diff --git a/.circleci/config.yml b/.circleci/config.yml index 84b315593..25cd831a7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,10 +3,14 @@ version: 2.1 orbs: slack: circleci/slack@3.4.2 +references: + images: + middleman: &MIDDLEMAN_IMAGE docker.mirror.hashicorp.services/hashicorp/middleman-hashicorp:0.3.44 + executors: go: docker: - - image: circleci/golang:1.15 + - image: docker.mirror.hashicorp.services/circleci/golang:1.15 environment: CONSUL_VERSION: 1.7.2 GOMAXPROCS: 4 @@ -176,6 +180,97 @@ jobs: name: test docker build for 'full' image command: docker build -t test-docker-full . + # Based on a similar job in terraform-website repo. + website-link-check: + docker: + - image: *MIDDLEMAN_IMAGE + steps: + - checkout: + path: terraform + + - run: + name: Determine changed website files, if any + working_directory: terraform + command: | + # Figure out what the current branch forked from. Compare against + # master and the set of "vX.Y" branches, and choose whichever branch + # we're the *fewest* commits ahead of. + # The point here isn't to perfectly predict where this will be + # merged; all we really care about is determining which commits are + # *unique to this PR,* so we don't accidentally complain about + # problems you had nothing to do with. + PARENT_BRANCH=$( + for br in $(git branch -rl --format='%(refname:short)' | grep -E '^origin/(master|v\d+\.\d+)$'); do + new_commits=$(git rev-list --first-parent ^${br} HEAD | wc -l); + echo "${br} ${new_commits}"; + done \ + | sort -n -k2 \ + | head -n1 \ + | awk '{print $1}'; + ) + echo "Checking current branch against: ${PARENT_BRANCH}" + MERGE_BASE=$(git merge-base HEAD ${PARENT_BRANCH}) + git diff --name-only -z --diff-filter=AMRCT ${MERGE_BASE}..HEAD -- ./website/ > /tmp/changed-website-files.txt + # --name-only: Return a list of affected files but don't show the changes. + # -z: Make that a null-separated list (instead of newline-separated), and + # DON'T mangle non-ASCII characters. + # --diff-filter=AMRCT: Only list files that were added, modified, renamed, + # copied, or had their type changed (file, symlink, etc.). In + # particular, we don't want to check deleted files. + # ${MERGE_BASE}..HEAD: Only consider files that have + # changed since this branch diverged from its parent branch. + # -- ./website/: Only consider files in the website directory. + echo "Changed website files:" + cat /tmp/changed-website-files.txt | tr '\0' '\n' + # Need to use "tr" for display because it's a null-separated list. + + - run: + name: Exit early if there's nothing to check + command: | + if [ ! -s /tmp/changed-website-files.txt ]; then + circleci-agent step halt + fi + + - run: + name: Check out terraform-website repo + command: git clone git@github.com:hashicorp/terraform-website.git + + - run: + name: Use local checkout for terraform submodule, instead of cloning again + working_directory: terraform-website + command: | + # Set submodule's URL to our existing checkout. + # (Using `pwd` because git's behavior with strictly relative paths is unreliable.) + git config --file=.gitmodules submodule.ext/terraform.url $(pwd)/../terraform/.git + # Make it so `make sync` will grab our current branch instead of stable-website. + git config --file=.gitmodules submodule.ext/terraform.branch HEAD + + - run: + name: Init/update terraform-website submodules + working_directory: terraform-website + command: make sync + + - run: + name: Set up terraform-website dependencies + working_directory: terraform-website/content + # If this does anything interesting, then the container needs an update. + command: bundle check || bundle install --path vendor/bundle --retry=3 + + - run: + name: Run middleman in background + working_directory: terraform-website/content + background: true + command: bundle exec middleman server + + - run: + name: Wait for server to start + command: until curl -sS http://localhost:4567/ > /dev/null; do sleep 1; done + + - run: + name: Check links in changed pages + working_directory: terraform-website/content + command: cat /tmp/changed-website-files.txt | bundle exec ./scripts/check-pr-links.rb + workflows: version: 2 test: @@ -203,3 +298,7 @@ workflows: - build-386 - build-amd64 - build-arm + + website-test: + jobs: + - website-link-check diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 7cb318a90..a63fd0579 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -64,7 +64,7 @@ The Terraform team is not merging PRs for new state storage backends at the curr Please see the [CODEOWNERS](https://github.com/hashicorp/terraform/blob/master/CODEOWNERS) file for the status of a given backend. Community members with an interest in a particular standard backend are welcome to help maintain it. -Currently, merging state storage backends places a significant burden on the Terraform team. The team must setup an environment and cloud service provider account, or a new database/storage/key-value service, in order to build and test remote state storage backends. The time and complexity of doing so prevents us from moving Terraform forward in other ways. +Currently, merging state storage backends places a significant burden on the Terraform team. The team must set up an environment and cloud service provider account, or a new database/storage/key-value service, in order to build and test remote state storage backends. The time and complexity of doing so prevents us from moving Terraform forward in other ways. We are working to remove ourselves from the critical path of state storage backends by moving them towards a plugin model. In the meantime, we won't be accepting new remote state backends into Terraform. diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000..08b438da4 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,21 @@ +--- +name: Backport Assistant Runner + +on: + pull_request_target: + types: + - closed + +jobs: + backport: + if: github.event.pull_request.merged + runs-on: ubuntu-latest + container: hashicorpdev/backport-assistant:0.2.1 + steps: + - name: Run Backport Assistant + run: | + backport-assistant backport + env: + BACKPORT_LABEL_REGEXP: "(?P\\d+\\.\\d+)-backport" + BACKPORT_TARGET_TEMPLATE: "v{{.target}}" + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} diff --git a/.tfdev b/.tfdev index a04d5c102..857b02d99 100644 --- a/.tfdev +++ b/.tfdev @@ -1,5 +1,4 @@ version_info { - commit_var = "main.GitCommit" version_var = "github.com/hashicorp/terraform/version.Version" prerelease_var = "github.com/hashicorp/terraform/version.Prerelease" } diff --git a/BUGPROCESS.md b/BUGPROCESS.md index adb8ec6ea..2126f4fbf 100644 --- a/BUGPROCESS.md +++ b/BUGPROCESS.md @@ -7,7 +7,7 @@ When a bug report is filed, our goal is to either: ## Process -### 1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained) require initial filtering. +### 1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained+) require initial filtering. These are raw reports that need categorization and support clarifying them. They need the following done: @@ -20,7 +20,7 @@ If an issue requires discussion with the user to get it out of this initial stat Once this initial filtering has been done, remove the new label. If an issue subjectively looks very high-impact and likely to impact many users, assign it to the [appropriate milestone](https://github.com/hashicorp/terraform/milestones) to mark it as being urgent. -### 2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc) +### 2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Abackend%2Fk8s+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc+) A core team member initially determines whether the issue is immediately reproducible. If they cannot readily reproduce it, they label it "waiting for reproduction" and correspond with the reporter to describe what is needed. When the issue is reproduced by a core team member, they label it "confirmed". @@ -29,15 +29,15 @@ A core team member initially determines whether the issue is immediately reprodu Note that the link above excludes issues reported before May 2020; this is to avoid including issues that were reported prior to this new process being implemented. [Unreproduced issues reported before May 2020](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3C2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Areactions-%2B1-desc) will be triaged as capacity permits. -### 3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) +### 3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fk8s+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) The next step for confirmed issues is to either: * explain why the behavior is expected, label the issue as "working as designed", and close it, or * locate the cause of the defect in the codebase. When the defect is located, and that description is posted on the issue, the issue is labeled "explained". In many cases, this step will get skipped if the fix is obvious, and engineers will jump forward and make a PR. - [Confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) should generally be considered high impact + [Confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Abackend%2Fk8s+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) should generally be considered high impact -### 4. The last step for [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) is to make a PR to fix them. +### 4. The last step for [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) is to make a PR to fix them. Explained issues that are expected to be fixed in a future release should be assigned to a milestone @@ -54,23 +54,23 @@ working as designed | confirmed as reported and closed because the behavior pending project | issue is confirmed but will require a significant project to fix ## Lack of response and unreproducible issues -When bugs that have been [labeled waiting response](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+label%3Awaiting-response+-label%3Aexplained+sort%3Aupdated-asc) or [labeled "waiting for reproduction"](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+label%3A%22waiting+for+reproduction%22+-label%3Aexplained+sort%3Aupdated-asc+) for more than 30 days, we'll use our best judgement to determine whether it's more helpful to close it or prompt the reporter again. If they again go without a response for 30 days, they can be closed with a polite message explaining why and inviting the person to submit the needed information or reproduction case in the future. +When bugs that have been [labeled waiting response](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fk8s+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+label%3Awaiting-response+-label%3Aexplained+sort%3Aupdated-asc+) or [labeled "waiting for reproduction"](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+label%3A%22waiting+for+reproduction%22+-label%3Aexplained+sort%3Aupdated-asc+) for more than 30 days, we'll use our best judgement to determine whether it's more helpful to close it or prompt the reporter again. If they again go without a response for 30 days, they can be closed with a polite message explaining why and inviting the person to submit the needed information or reproduction case in the future. The intent of this process is to get fix the maximum number of bugs in Terraform as quickly as possible, and having un-actionable bug reports makes it harder for Terraform Core team members and community contributors to find bugs they can actually work on. ## Helpful GitHub Filters ### Triage Process -1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained) require initial filtering. -2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc) -3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+). Prioritize [confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+). -4. Fix [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) +1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fk8s+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained+) require initial filtering. +2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Abackend%2Fk8s+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc+) +3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+). Prioritize [confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Aexplained+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+). +4. Fix [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) ### Other Backlog -[Confirmed needs for documentation fixes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Adocumentation++label%3Aconfirmed+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+) +[Confirmed needs for documentation fixes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Adocumentation++label%3Aconfirmed+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+) -[Confirmed bugs that will require significant projects to fix](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aconfirmed+label%3A%22pending+project%22++-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2) +[Confirmed bugs that will require significant projects to fix](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aconfirmed+label%3A%22pending+project%22+-label%3Abackend%2Fk8s+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+) ### Milestone Use diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d12b1a43..4e1f7796e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,17 +1,52 @@ ## 0.15.0 (Unreleased) +BREAKING CHANGES: + +* Empty provider configuration blocks should be removed from modules. If a configuration alias is required within the module, it can be defined using the `configuration_aliases` argument within `required_providers`. Existing module configurations which were accepted but could produce incorrect or undefined behavior may now return errors when loading the configuration. [GH-27739] +* The `list` and `map` functions, both of which were deprecated since Terraform v0.12, are now removed. You can replace uses of these functions with `tolist([...])` and `tomap({...})` respectively. ([#26818](https://github.com/hashicorp/terraform/issues/26818)) +* Terraform now requires UTF-8 character encoding and virtual terminal support when running on Windows. This unifies Terraform's terminal handling on Windows with that of other platforms, as per [Microsoft recommendations](https://docs.microsoft.com/en-us/windows/console/classic-vs-vt). Terraform previously required these terminal features on all other platforms, and now requires them on Windows too. + + UTF-8 and virtual terminal support were introduced across various Windows 10 updates, and so Terraform is no longer officially supported on the original release of Windows 10 or on Windows 8 and earlier. However, there are currently no technical measures to artificially _prevent_ Terraform from running on these obsolete Windows releases, and so you _may_ still be able to use Terraform v0.15 on older Windows versions if you either disable formatting (using the `-no-color`) option, or if you use a third-party terminal emulator package such as [ConEmu](https://conemu.github.io/), [Cmder](https://cmder.net/), or [mintty](https://mintty.github.io/). + + We strongly encourage planning to migrate to a newer version of Windows rather than relying on these workarounds for the long term, because the Terraform team will test future releases only on up-to-date Windows 10 and can therefore not guarantee ongoing support for older versions. + +* Interrupting execution will now cause terraform to exit with a non-zero exit status. ([#26738](https://github.com/hashicorp/terraform/issues/26738)) +* The trailing `[DIR]` argument to specify the working directory for various commands is no longer supported. Use the global `-chdir` option instead. ([#27664](https://github.com/hashicorp/terraform/pull/27664)) + + For example, instead of `terraform init infra`, write `terraform -chdir=infra init`. +* The `-lock` and `-lock-timeout` options are no longer available on `terraform init` ([#27464](https://github.com/hashicorp/terraform/issues/27464)) +* The `-verify-plugins=false` option is no longer available on `terraform init`. (Terraform now _always_ verifies plugins.) ([#27461](https://github.com/hashicorp/terraform/issues/27461)) +* The `-get-plugins=false` option is no longer available on `terraform init`. (Terraform now _always_ installs plugins.) ([#27463](https://github.com/hashicorp/terraform/issues/27463)) +* The `-force` option is no longer available on `terraform destroy`. Use `-auto-approve` instead ([#27681](https://github.com/hashicorp/terraform/pull/27681)) +* `terraform version -json` output no longer includes the (previously-unpopulated) "revision" property [[#27484](https://github.com/hashicorp/terraform/issues/27484)] +* The `atlas` backend, which was deprecated since Terraform v0.12, is now removed. ([#26651](https://github.com/hashicorp/terraform/issues/26651)) +* In the `gcs` backend the `path` config argument, which was deprecated since Terraform v0.11, is now removed. Use the `prefix` argument instead. ([#26841](https://github.com/hashicorp/terraform/issues/26841)) + ENHANCEMENTS: -* cli: Improved support for Windows console UI on Windows 10, including bold colors and underline for HCL diagnostics. [GH-26588] -* cli: Small reorganization and tidier formatting for the main help text printed by `terraform` with no subcommands. [GH-26695] -* cli: Removed the `terraform debug` container command, which has not had any subcommands under it for a long time. [GH-26695] +* config: A `required_providers` entry can now contain `configuration_aliases` to declare additional configuration aliases names without requirring a configuration block [GH-27739] +* config: Terraform will now emit a warning if you declare a `backend` block in a non-root module. Terraform has always ignored such declarations, but previously did so silently. This is a warning rather than an error only because it is sometimes convenient to temporarily use a root module as if it were a child module in order to test or debug its behavior separately from its main backend. ([#26954](https://github.com/hashicorp/terraform/issues/26954)) +* cli: The family of error messages with the summary "Invalid for_each argument" will now include some additional context about which external values contributed to the result. ([#26747](https://github.com/hashicorp/terraform/issues/26747)) +* cli: Terraform now uses UTF-8 and full VT mode even when running on Windows. Previously Terraform was using the "classic" Windows console API, which was far more limited in what formatting sequences it supported and which characters it could render. ([#27487](https://github.com/hashicorp/terraform/issues/27487)) +* cli: Improved support for Windows console UI on Windows 10, including bold colors and underline for HCL diagnostics. ([#26588](https://github.com/hashicorp/terraform/issues/26588)) +* cli: Diagnostic messages now have a vertical line along their left margin, which we hope will achieve a better visual heirarchy for sighted users and thus make it easier to see where the errors and warnings start and end in relation to other content that might be printed alongside. ([#27343](https://github.com/hashicorp/terraform/issues/27343)) +* cli: Typing an invalid top-level command, like `terraform destory` instead of `destroy`, will now print out a specific error message about the command being invalid, rather than just printing out the usual help directory. ([#26967](https://github.com/hashicorp/terraform/issues/26967)) +* cli: Plugin crashes will now be reported with more detail, pointing out the plugin name and the method call along with the stack trace ([#26694](https://github.com/hashicorp/terraform/issues/26694)) +* provisioner/remote-exec: Can now run in a mode that expects the remote system to be running Windows and excuting commands using the Windows command interpreter, rather than a Unix-style shell. Specify the `target_platform` as `"windows"` in the `connection` block. ([#26865](https://github.com/hashicorp/terraform/issues/26865)) BUG FIXES: -* cli: Exit with an error if unable to gather input from the UI. For example, this may happen when running in a non-interactive environment but without `-input=false`. Previously Terraform would interpret these errors as empty strings, which could be confusing. [GH-26509] - -BREAKING CHANGES: -* backend/atlas: the `atlas` backend, which was deprecated in v0.12, has been removed. [GH-26651] +* cli: Exit with an error if unable to gather input from the UI. For example, this may happen when running in a non-interactive environment but without `-input=false`. Previously Terraform would interpret these errors as empty strings, which could be confusing. ([#26509](https://github.com/hashicorp/terraform/issues/26509)) +* cli: TF_LOG levels other than `trace` will now work correctly ([#26632](https://github.com/hashicorp/terraform/issues/26632)) +* cli: Core and Provider logs can now be enabled separately for debugging, using `TF_LOG_CORE` and `TF_LOG_PROVIDER` ([#26685](https://github.com/hashicorp/terraform/issues/26685)) +* command/console: expressions using `path` (`path.root`, `path.module`) now return the same result as they would in a configuration ([#27263](https://github.com/hashicorp/terraform/issues/27263)) +* command/show: fix issue with child_modules not properly displaying in certain circumstances ([#27352](https://github.com/hashicorp/terraform/issues/27352)) +* command/state list: fix bug where nested modules' resources were missing from `state list` output ([#27268](https://github.com/hashicorp/terraform/issues/27268)) +* command/state mv: fix display names in errors and improve error when failing to target a whole resource ([#27482](https://github.com/hashicorp/terraform/issues/27482)) +* command/taint: show resource name in -allow-missing warning ([#27501](https://github.com/hashicorp/terraform/issues/27501)) +* command/untaint: show resource name in -allow-missing warning ([#27502](https://github.com/hashicorp/terraform/issues/27502)) +* core: validate will now ignore providers without configuration ([#24896](https://github.com/hashicorp/terraform/issues/24896)) +* core: refresh data sources during destroy ([#27408](https://github.com/hashicorp/terraform/issues/27408)) ## Previous Releases diff --git a/Dockerfile b/Dockerfile index 58f8f752a..1e1bb9760 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ # the officially-released binary from releases.hashicorp.com and are # built by the (closed-source) official release process. -FROM golang:alpine +FROM docker.mirror.hashicorp.services/golang:alpine LABEL maintainer="HashiCorp Terraform Team " RUN apk add --no-cache git bash openssh diff --git a/Makefile b/Makefile index 0324eca9c..d26ee96ea 100644 --- a/Makefile +++ b/Makefile @@ -6,12 +6,6 @@ VERSION?="0.3.44" # "make protobuf". generate: go generate ./... - # go fmt doesn't support -mod=vendor but it still wants to populate the - # module cache with everything in go.mod even though formatting requires - # no dependencies, and so we're disabling modules mode for this right - # now until the "go fmt" behavior is rationalized to either support the - # -mod= argument or _not_ try to install things. - GO111MODULE=off go fmt command/internal_plugin_list.go > /dev/null # We separate the protobuf generation because most development tasks on # Terraform do not involve changing protobuf files and protoc is not a @@ -48,29 +42,6 @@ endif --workdir /terraform-website \ hashicorp/middleman-hashicorp:${VERSION} -website-test: -ifeq (,$(wildcard $(GOPATH)/src/$(WEBSITE_REPO))) - echo "$(WEBSITE_REPO) not found in your GOPATH (necessary for layouts and assets), get-ting..." - git clone https://$(WEBSITE_REPO) $(GOPATH)/src/$(WEBSITE_REPO) -endif - $(eval WEBSITE_PATH := $(GOPATH)/src/$(WEBSITE_REPO)) - @echo "==> Testing core website in Docker..." - -@docker stop "tf-website-core-temp" - @docker run \ - --detach \ - --rm \ - --name "tf-website-core-temp" \ - --publish "4567:4567" \ - --volume "$(shell pwd)/website:/website" \ - --volume "$(shell pwd):/ext/terraform" \ - --volume "$(WEBSITE_PATH)/content:/terraform-website" \ - --volume "$(WEBSITE_PATH)/content/source/assets:/website/docs/assets" \ - --volume "$(WEBSITE_PATH)/content/source/layouts:/website/docs/layouts" \ - --workdir /terraform-website \ - hashicorp/middleman-hashicorp:${VERSION} - $(WEBSITE_PATH)/content/scripts/check-links.sh "http://127.0.0.1:4567" "/" "/docs/providers/*" - @docker stop "tf-website-core-temp" - # disallow any parallelism (-j) for Make. This is necessary since some # commands during the build process create temporary files that collide # under parallel conditions. diff --git a/README.md b/README.md index 900577641..c14a3fef4 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Show off your Terraform knowledge by passing a certification exam. Visit the [ce Developing Terraform -------------------- -This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins that each have their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub. Instructions for developing each provider are in the associated README file. For more information, see [the provider development overview](https://www.terraform.io/docs/plugins/provider.html). +This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins, and Terraform can automatically download providers that are published on [the Terraform Registry](https://registry.terraform.io). HashiCorp develops some providers, and others are developed by other organizations. For more information, see [Extending Terraform](https://www.terraform.io/docs/extend/index.html). To learn more about compiling Terraform and contributing suggested changes, please refer to [the contributing guide](.github/CONTRIBUTING.md). diff --git a/addrs/module_instance.go b/addrs/module_instance.go index 75c69254a..f3efa7eaf 100644 --- a/addrs/module_instance.go +++ b/addrs/module_instance.go @@ -82,6 +82,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra var mi ModuleInstance var diags tfdiags.Diagnostics +LOOP: for len(remain) > 0 { var next string switch tt := remain[0].(type) { @@ -96,7 +97,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra Detail: "Module address prefix must be followed by dot and then a name.", Subject: remain[0].SourceRange().Ptr(), }) - break + break LOOP } if next != "module" { @@ -129,7 +130,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra Detail: "Prefix \"module.\" must be followed by a module name.", Subject: remain[0].SourceRange().Ptr(), }) - break + break LOOP } remain = remain[1:] step := ModuleInstanceStep{ diff --git a/backend/backend.go b/backend/backend.go index 0394ae291..b929aa393 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -188,12 +188,11 @@ type Operation struct { // The options below are more self-explanatory and affect the runtime // behavior of the operation. - AutoApprove bool - Destroy bool - DestroyForce bool - Parallelism int - Targets []addrs.Targetable - Variables map[string]UnparsedVariableValue + AutoApprove bool + Destroy bool + Parallelism int + Targets []addrs.Targetable + Variables map[string]UnparsedVariableValue // Some operations use root module variables only opportunistically or // don't need them at all. If this flag is set, the backend must treat diff --git a/backend/cli.go b/backend/cli.go index cd29e3862..80313c39c 100644 --- a/backend/cli.go +++ b/backend/cli.go @@ -1,9 +1,11 @@ package backend import ( - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform/internal/terminal" + "github.com/hashicorp/terraform/terraform" ) // CLI is an optional interface that can be implemented to be initialized @@ -48,6 +50,12 @@ type CLIOpts struct { CLI cli.Ui CLIColor *colorstring.Colorize + // Streams describes the low-level streams for Stdout, Stderr and Stdin, + // including some metadata about whether they are terminals. Most output + // should go via the object in field CLI above, but Streams can be useful + // for tailoring the output to fit the attached terminal, for example. + Streams *terminal.Streams + // ShowDiagnostics is a function that will format and print diagnostic // messages to the UI. ShowDiagnostics func(vals ...interface{}) diff --git a/backend/local/backend.go b/backend/local/backend.go index 866c4899a..8acbb0054 100644 --- a/backend/local/backend.go +++ b/backend/local/backend.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/internal/terminal" "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/tfdiags" @@ -38,6 +39,10 @@ type Local struct { CLI cli.Ui CLIColor *colorstring.Colorize + // If CLI is set then Streams might also be set, to describe the physical + // input/output handles that CLI is connected to. + Streams *terminal.Streams + // ShowDiagnostics prints diagnostic messages to the UI. ShowDiagnostics func(vals ...interface{}) diff --git a/backend/local/backend_apply.go b/backend/local/backend_apply.go index 7e177eac8..cd018269a 100644 --- a/backend/local/backend_apply.go +++ b/backend/local/backend_apply.go @@ -39,15 +39,13 @@ func (b *Local) opApply( return } - // Setup our count hook that keeps track of resource changes - countHook := new(CountHook) stateHook := new(StateHook) if b.ContextOpts == nil { b.ContextOpts = new(terraform.ContextOpts) } old := b.ContextOpts.Hooks defer func() { b.ContextOpts.Hooks = old }() - b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook) + b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, stateHook) // Get our context tfCtx, _, opState, contextDiags := b.context(op) @@ -81,7 +79,7 @@ func (b *Local) opApply( trivialPlan := plan.Changes.Empty() hasUI := op.UIOut != nil && op.UIIn != nil - mustConfirm := hasUI && ((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove && !trivialPlan)) + mustConfirm := hasUI && !op.AutoApprove && !trivialPlan if mustConfirm { var desc, query string if op.Destroy { @@ -137,7 +135,7 @@ func (b *Local) opApply( } } - // Setup our hook for continuous state updates + // Set up our hook for continuous state updates stateHook.StateMgr = opState // Start the apply in a goroutine so that we can be interrupted. @@ -183,35 +181,6 @@ func (b *Local) opApply( // here just before we show the summary and next steps. If we encountered // errors then we would've returned early at some other point above. b.ShowDiagnostics(diags) - - // If we have a UI, output the results - if b.CLI != nil { - if op.Destroy { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "[reset][bold][green]\n"+ - "Destroy complete! Resources: %d destroyed.", - countHook.Removed))) - } else { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "[reset][bold][green]\n"+ - "Apply complete! Resources: %d added, %d changed, %d destroyed.", - countHook.Added, - countHook.Changed, - countHook.Removed))) - } - - // only show the state file help message if the state is local. - if (countHook.Added > 0 || countHook.Changed > 0) && b.StateOutPath != "" { - b.CLI.Output(b.Colorize().Color(fmt.Sprintf( - "[reset]\n"+ - "The state of your infrastructure has been saved to the path\n"+ - "below. This state is required to modify and destroy your\n"+ - "infrastructure, so keep it safe. To inspect the complete state\n"+ - "use the `terraform show` command.\n\n"+ - "State path: %s", - b.StateOutPath))) - } - } } // backupStateForError is called in a scenario where we're unable to persist the diff --git a/backend/local/backend_apply_test.go b/backend/local/backend_apply_test.go index 73c384d42..106853e7c 100644 --- a/backend/local/backend_apply_test.go +++ b/backend/local/backend_apply_test.go @@ -27,7 +27,7 @@ func TestLocal_applyBasic(t *testing.T) { defer cleanup() p := TestLocalProvider(t, b, "test", applyFixtureSchema()) - p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), "ami": cty.StringVal("bar"), })} @@ -70,7 +70,7 @@ func TestLocal_applyEmptyDir(t *testing.T) { defer cleanup() p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) - p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})} + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})} op, configCleanup := testOperationApply(t, "./testdata/empty") defer configCleanup() @@ -101,7 +101,7 @@ func TestLocal_applyEmptyDirDestroy(t *testing.T) { defer cleanup() p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) - p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{} + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{} op, configCleanup := testOperationApply(t, "./testdata/empty") defer configCleanup() @@ -193,7 +193,7 @@ func TestLocal_applyBackendFail(t *testing.T) { defer cleanup() p := TestLocalProvider(t, b, "test", applyFixtureSchema()) - p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), "ami": cty.StringVal("bar"), })} @@ -241,6 +241,30 @@ test_instance.foo: assertBackendStateUnlocked(t, b) } +func TestLocal_applyRefreshFalse(t *testing.T) { + b, cleanup := TestLocal(t) + defer cleanup() + + p := TestLocalProvider(t, b, "test", planFixtureSchema()) + testStateFile(t, b.StatePath, testPlanState()) + + op, configCleanup := testOperationApply(t, "./testdata/plan") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + + if p.ReadResourceCalled { + t.Fatal("ReadResource should not be called") + } +} + type backendWithFailingState struct { Local } diff --git a/backend/local/backend_local.go b/backend/local/backend_local.go index b4d6002c0..1b67939af 100644 --- a/backend/local/backend_local.go +++ b/backend/local/backend_local.go @@ -78,7 +78,7 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, *configload. opts.Targets = op.Targets opts.UIInput = op.UIIn - opts.SkipRefresh = op.Type == backend.OperationTypePlan && !op.PlanRefresh + opts.SkipRefresh = op.Type != backend.OperationTypeRefresh && !op.PlanRefresh if opts.SkipRefresh { log.Printf("[DEBUG] backend/local: skipping refresh of managed resources") } diff --git a/backend/local/backend_plan.go b/backend/local/backend_plan.go index 0fbb0b58a..3a45ec607 100644 --- a/backend/local/backend_plan.go +++ b/backend/local/backend_plan.go @@ -32,6 +32,8 @@ func (b *Local) opPlan( var diags tfdiags.Diagnostics + outputColumns := b.outputColumns() + if op.PlanFile != nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, @@ -57,14 +59,9 @@ func (b *Local) opPlan( return } - // Setup our count hook that keeps track of resource changes - countHook := new(CountHook) if b.ContextOpts == nil { b.ContextOpts = new(terraform.ContextOpts) } - old := b.ContextOpts.Hooks - defer func() { b.ContextOpts.Hooks = old }() - b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook) // Get our context tfCtx, configSnap, opState, ctxDiags := b.context(op) @@ -150,6 +147,7 @@ func (b *Local) opPlan( if runningOp.PlanEmpty { b.CLI.Output("\n" + b.Colorize().Color(strings.TrimSpace(planNoChanges))) + b.CLI.Output("\n" + strings.TrimSpace(format.WordWrap(planNoChangesDetail, outputColumns))) // Even if there are no changes, there still could be some warnings b.ShowDiagnostics(diags) return @@ -166,15 +164,15 @@ func (b *Local) opPlan( // tool which is presumed to provide its own UI for further actions. if !b.RunningInAutomation { - b.CLI.Output("\n------------------------------------------------------------------------") + b.outputHorizRule() if path := op.PlanOutPath; path == "" { b.CLI.Output(fmt.Sprintf( - "\n" + strings.TrimSpace(planHeaderNoOutput) + "\n", + "\n" + strings.TrimSpace(format.WordWrap(planHeaderNoOutput, outputColumns)) + "\n", )) } else { b.CLI.Output(fmt.Sprintf( - "\n"+strings.TrimSpace(planHeaderYesOutput)+"\n", + "\n"+strings.TrimSpace(format.WordWrap(planHeaderYesOutput, outputColumns))+"\n", path, path, )) } @@ -183,7 +181,7 @@ func (b *Local) opPlan( } func (b *Local) renderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Schemas) { - RenderPlan(plan, baseState, schemas, b.CLI, b.Colorize()) + RenderPlan(plan, baseState, schemas, b.CLI, b.Colorize(), b.outputColumns()) } // RenderPlan renders the given plan to the given UI. @@ -206,7 +204,7 @@ func (b *Local) renderPlan(plan *plans.Plan, baseState *states.State, schemas *t // output values will not currently be rendered because their prior values // are currently stored only in the prior state. (see the docstring for // func planHasSideEffects for why this is and when that might change) -func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Schemas, ui cli.Ui, colorize *colorstring.Colorize) { +func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Schemas, ui cli.Ui, colorize *colorstring.Colorize, width int) { counts := map[plans.Action]int{} var rChanges []*plans.ResourceInstanceChangeSrc for _, change := range plan.Changes.Resources { @@ -220,7 +218,7 @@ func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Sc } headerBuf := &bytes.Buffer{} - fmt.Fprintf(headerBuf, "\n%s\n", strings.TrimSpace(planHeaderIntro)) + fmt.Fprintf(headerBuf, "\n%s\n", strings.TrimSpace(format.WordWrap(planHeaderIntro, width))) if counts[plans.Create] > 0 { fmt.Fprintf(headerBuf, "%s create\n", format.DiffActionSymbol(plans.Create)) } @@ -314,24 +312,31 @@ func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Sc // If there is at least one planned change to the root module outputs // then we'll render a summary of those too. - if len(plan.Changes.Outputs) > 0 { - ui.Output(colorize.Color("[reset]\n[bold]Changes to Outputs:[reset]" + format.OutputChanges(plan.Changes.Outputs, colorize))) + var changedRootModuleOutputs []*plans.OutputChangeSrc + for _, output := range plan.Changes.Outputs { + if !output.Addr.Module.IsRoot() { + continue + } + if output.ChangeSrc.Action == plans.NoOp { + continue + } + changedRootModuleOutputs = append(changedRootModuleOutputs, output) + } + if len(changedRootModuleOutputs) > 0 { + ui.Output(colorize.Color("[reset]\n[bold]Changes to Outputs:[reset]" + format.OutputChanges(changedRootModuleOutputs, colorize))) } } const planHeaderIntro = ` -An execution plan has been generated and is shown below. -Resource actions are indicated with the following symbols: +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: ` const planHeaderNoOutput = ` -Note: You didn't specify an "-out" parameter to save this plan, so Terraform -can't guarantee that exactly these actions will be performed if -"terraform apply" is subsequently run. +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. ` const planHeaderYesOutput = ` -This plan was saved to: %s +Saved the plan to: %s To perform exactly these actions, run the following command to apply: terraform apply %q @@ -339,14 +344,8 @@ To perform exactly these actions, run the following command to apply: const planNoChanges = ` [reset][bold][green]No changes. Infrastructure is up-to-date.[reset][green] - -This means that Terraform did not detect any differences between your -configuration and real physical resources that exist. As a result, no -actions need to be performed. ` -const planRefreshing = ` -[reset][bold]Refreshing Terraform state in-memory prior to plan...[reset] -The refreshed state will be used to calculate this plan, but will not be -persisted to local or remote state storage. +const planNoChangesDetail = ` +That Terraform did not detect any differences between your configuration and the remote system(s). As a result, there are no actions to take. ` diff --git a/backend/local/backend_plan_test.go b/backend/local/backend_plan_test.go index f275d20b1..4e0bc85a3 100644 --- a/backend/local/backend_plan_test.go +++ b/backend/local/backend_plan_test.go @@ -4,7 +4,6 @@ import ( "context" "os" "path/filepath" - "reflect" "strings" "testing" @@ -51,7 +50,7 @@ func TestLocal_planInAutomation(t *testing.T) { defer cleanup() TestLocalProvider(t, b, "test", planFixtureSchema()) - const msg = `You didn't specify an "-out" parameter` + const msg = `You didn't use the -out option` // When we're "in automation" we omit certain text from the // plan output. However, testing for the absense of text is @@ -77,7 +76,7 @@ func TestLocal_planInAutomation(t *testing.T) { output := b.CLI.(*cli.MockUi).OutputWriter.String() if !strings.Contains(output, msg) { - t.Fatalf("missing next-steps message when not in automation") + t.Fatalf("missing next-steps message when not in automation\nwant: %s\noutput:\n%s", msg, output) } } @@ -241,6 +240,56 @@ Changes to Outputs: } } +// Module outputs should not cause the plan to be rendered +func TestLocal_planModuleOutputsChanged(t *testing.T) { + b, cleanup := TestLocal(t) + defer cleanup() + testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue(addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("mod", addrs.NoKey), + OutputValue: addrs.OutputValue{Name: "changed"}, + }, cty.StringVal("before"), false) + })) + b.CLI = cli.NewMockUi() + outDir := testTempDir(t) + defer os.RemoveAll(outDir) + planPath := filepath.Join(outDir, "plan.tfplan") + op, configCleanup := testOperationPlan(t, "./testdata/plan-module-outputs-changed") + defer configCleanup() + op.PlanRefresh = true + op.PlanOutPath = planPath + cfg := cty.ObjectVal(map[string]cty.Value{ + "path": cty.StringVal(b.StatePath), + }) + cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type()) + if err != nil { + t.Fatal(err) + } + op.PlanOutBackend = &plans.Backend{ + Type: "local", + Config: cfgRaw, + } + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("plan operation failed") + } + if !run.PlanEmpty { + t.Fatal("plan should be empty") + } + + expectedOutput := strings.TrimSpace(` +No changes. Infrastructure is up-to-date. +`) + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, expectedOutput) { + t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput) + } +} + func TestLocal_planTainted(t *testing.T) { b, cleanup := TestLocal(t) defer cleanup() @@ -281,8 +330,8 @@ func TestLocal_planTainted(t *testing.T) { t.Fatal("plan should not be empty") } - expectedOutput := `An execution plan has been generated and is shown below. -Resource actions are indicated with the following symbols: + expectedOutput := `Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: -/+ destroy and then create replacement Terraform will perform the following actions: @@ -383,8 +432,8 @@ func TestLocal_planDeposedOnly(t *testing.T) { // it's also possible for there to be _multiple_ deposed objects, in the // unlikely event that create_before_destroy _keeps_ crashing across // subsequent runs. - expectedOutput := `An execution plan has been generated and is shown below. -Resource actions are indicated with the following symbols: + expectedOutput := `Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: + create - destroy @@ -457,8 +506,8 @@ func TestLocal_planTainted_createBeforeDestroy(t *testing.T) { t.Fatal("plan should not be empty") } - expectedOutput := `An execution plan has been generated and is shown below. -Resource actions are indicated with the following symbols: + expectedOutput := `Terraform used the selected providers to generate the following execution +plan. Resource actions are indicated with the following symbols: +/- create replacement and then destroy Terraform will perform the following actions: @@ -509,7 +558,7 @@ func TestLocal_planDestroy(t *testing.T) { b, cleanup := TestLocal(t) defer cleanup() - p := TestLocalProvider(t, b, "test", planFixtureSchema()) + TestLocalProvider(t, b, "test", planFixtureSchema()) testStateFile(t, b.StatePath, testPlanState()) outDir := testTempDir(t) @@ -543,10 +592,6 @@ func TestLocal_planDestroy(t *testing.T) { t.Fatalf("plan operation failed") } - if p.ReadResourceCalled { - t.Fatal("ReadResource should not be called") - } - if run.PlanEmpty { t.Fatal("plan should not be empty") } @@ -563,7 +608,7 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) { b, cleanup := TestLocal(t) defer cleanup() - p := TestLocalProvider(t, b, "test", planFixtureSchema()) + TestLocalProvider(t, b, "test", planFixtureSchema()) testStateFile(t, b.StatePath, testPlanState_withDataSource()) b.CLI = cli.NewMockUi() @@ -599,14 +644,6 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) { t.Fatalf("plan operation failed") } - if p.ReadResourceCalled { - t.Fatal("ReadResource should not be called") - } - - if p.ReadDataSourceCalled { - t.Fatal("ReadDataSourceCalled should not be called") - } - if run.PlanEmpty { t.Fatal("plan should not be empty") } @@ -640,7 +677,7 @@ Plan: 0 to add, 0 to change, 1 to destroy.` } func getAddrs(resources []*plans.ResourceInstanceChangeSrc) []string { - addrs := make([]string, len(resources), len(resources)) + addrs := make([]string, len(resources)) for i, r := range resources { addrs[i] = r.Addr.String() } @@ -690,49 +727,6 @@ func TestLocal_planOutPathNoChange(t *testing.T) { } } -// TestLocal_planScaleOutNoDupeCount tests a Refresh/Plan sequence when a -// resource count is scaled out. The scaled out node needs to exist in the -// graph and run through a plan-style sequence during the refresh phase, but -// can conflate the count if its post-diff count hooks are not skipped. This -// checks to make sure the correct resource count is ultimately given to the -// UI. -func TestLocal_planScaleOutNoDupeCount(t *testing.T) { - b, cleanup := TestLocal(t) - defer cleanup() - TestLocalProvider(t, b, "test", planFixtureSchema()) - testStateFile(t, b.StatePath, testPlanState()) - - actual := new(CountHook) - b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, actual) - - outDir := testTempDir(t) - defer os.RemoveAll(outDir) - - op, configCleanup := testOperationPlan(t, "./testdata/plan-scaleout") - defer configCleanup() - op.PlanRefresh = true - - run, err := b.Operation(context.Background(), op) - if err != nil { - t.Fatalf("bad: %s", err) - } - <-run.Done() - if run.Result != backend.OperationSuccess { - t.Fatalf("plan operation failed") - } - - expected := new(CountHook) - expected.ToAdd = 1 - expected.ToChange = 0 - expected.ToRemoveAndAdd = 0 - expected.ToRemove = 0 - - if !reflect.DeepEqual(expected, actual) { - t.Fatalf("Expected %#v, got %#v instead.", - expected, actual) - } -} - func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func()) { t.Helper() diff --git a/backend/local/backend_refresh_test.go b/backend/local/backend_refresh_test.go index cb6cb9b4f..6062ed0d6 100644 --- a/backend/local/backend_refresh_test.go +++ b/backend/local/backend_refresh_test.go @@ -24,7 +24,7 @@ func TestLocal_refresh(t *testing.T) { testStateFile(t, b.StatePath, testRefreshState()) p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), })} @@ -76,7 +76,7 @@ func TestLocal_refreshInput(t *testing.T) { testStateFile(t, b.StatePath, testRefreshState()) p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), })} p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { @@ -119,7 +119,7 @@ func TestLocal_refreshValidate(t *testing.T) { p := TestLocalProvider(t, b, "test", refreshFixtureSchema()) testStateFile(t, b.StatePath, testRefreshState()) p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), })} @@ -135,6 +135,52 @@ func TestLocal_refreshValidate(t *testing.T) { } <-run.Done() + checkState(t, b.StateOutPath, ` +test_instance.foo: + ID = yes + provider = provider["registry.terraform.io/hashicorp/test"] + `) +} + +func TestLocal_refreshValidateProviderConfigured(t *testing.T) { + b, cleanup := TestLocal(t) + defer cleanup() + + schema := &terraform.ProviderSchema{ + Provider: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, + }, + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + }, + }, + } + + p := TestLocalProvider(t, b, "test", schema) + testStateFile(t, b.StatePath, testRefreshState()) + p.ReadResourceFn = nil + p.ReadResourceResponse = &providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + // Enable validation + b.OpValidation = true + + op, configCleanup := testOperationRefresh(t, "./testdata/refresh-provider-config") + defer configCleanup() + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("bad: %s", err) + } + <-run.Done() + if !p.PrepareProviderConfigCalled { t.Fatal("Prepare provider config should be called") } diff --git a/backend/local/cli.go b/backend/local/cli.go index c3d7a65ac..432ffc383 100644 --- a/backend/local/cli.go +++ b/backend/local/cli.go @@ -4,12 +4,14 @@ import ( "log" "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/format" ) // backend.CLI impl. func (b *Local) CLIInit(opts *backend.CLIOpts) error { b.CLI = opts.CLI b.CLIColor = opts.CLIColor + b.Streams = opts.Streams b.ShowDiagnostics = opts.ShowDiagnostics b.ContextOpts = opts.ContextOpts b.OpInput = opts.Input @@ -34,3 +36,45 @@ func (b *Local) CLIInit(opts *backend.CLIOpts) error { return nil } + +// outputColumns returns the number of text character cells any non-error +// output should be wrapped to. +// +// This is the number of columns to use if you are calling b.CLI.Output or +// b.CLI.Info. +func (b *Local) outputColumns() int { + if b.Streams == nil { + // We can potentially get here in tests, if they don't populate the + // CLIOpts fully. + return 78 // placeholder just so we don't panic + } + return b.Streams.Stdout.Columns() +} + +// errorColumns returns the number of text character cells any error +// output should be wrapped to. +// +// This is the number of columns to use if you are calling b.CLI.Error or +// b.CLI.Warn. +func (b *Local) errorColumns() int { + if b.Streams == nil { + // We can potentially get here in tests, if they don't populate the + // CLIOpts fully. + return 78 // placeholder just so we don't panic + } + return b.Streams.Stderr.Columns() +} + +// outputHorizRule will call b.CLI.Output with enough horizontal line +// characters to fill an entire row of output. +// +// This function does nothing if the backend doesn't have a CLI attached. +// +// If UI color is enabled, the rule will get a dark grey coloring to try to +// visually de-emphasize it. +func (b *Local) outputHorizRule() { + if b.CLI == nil { + return + } + b.CLI.Output(format.HorizontalRule(b.CLIColor, b.outputColumns())) +} diff --git a/backend/local/counthookaction_string.go b/backend/local/counthookaction_string.go deleted file mode 100644 index 591004749..000000000 --- a/backend/local/counthookaction_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT. - -package local - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[countHookActionAdd-0] - _ = x[countHookActionChange-1] - _ = x[countHookActionRemove-2] -} - -const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove" - -var _countHookAction_index = [...]uint8{0, 18, 39, 60} - -func (i countHookAction) String() string { - if i >= countHookAction(len(_countHookAction_index)-1) { - return "countHookAction(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _countHookAction_name[_countHookAction_index[i]:_countHookAction_index[i+1]] -} diff --git a/backend/local/hook_count_action.go b/backend/local/hook_count_action.go deleted file mode 100644 index 9adcd9047..000000000 --- a/backend/local/hook_count_action.go +++ /dev/null @@ -1,11 +0,0 @@ -package local - -//go:generate go run golang.org/x/tools/cmd/stringer -type=countHookAction hook_count_action.go - -type countHookAction byte - -const ( - countHookActionAdd countHookAction = iota - countHookActionChange - countHookActionRemove -) diff --git a/backend/local/testdata/destroy-with-ds/main.tf b/backend/local/testdata/destroy-with-ds/main.tf index 4ee80ea3d..7062d896b 100644 --- a/backend/local/testdata/destroy-with-ds/main.tf +++ b/backend/local/testdata/destroy-with-ds/main.tf @@ -1,4 +1,5 @@ resource "test_instance" "foo" { + count = 1 ami = "bar" } diff --git a/backend/local/testdata/plan-module-outputs-changed/main.tf b/backend/local/testdata/plan-module-outputs-changed/main.tf new file mode 100644 index 000000000..ba8468469 --- /dev/null +++ b/backend/local/testdata/plan-module-outputs-changed/main.tf @@ -0,0 +1,3 @@ +module "mod" { + source = "./mod" +} diff --git a/backend/local/testdata/plan-module-outputs-changed/mod/main.tf b/backend/local/testdata/plan-module-outputs-changed/mod/main.tf new file mode 100644 index 000000000..cee14bd9c --- /dev/null +++ b/backend/local/testdata/plan-module-outputs-changed/mod/main.tf @@ -0,0 +1,3 @@ +output "changed" { + value = "after" +} diff --git a/backend/local/testdata/plan-outputs-changed/main.tf b/backend/local/testdata/plan-outputs-changed/main.tf index c1686a89e..1df236ff4 100644 --- a/backend/local/testdata/plan-outputs-changed/main.tf +++ b/backend/local/testdata/plan-outputs-changed/main.tf @@ -1,3 +1,7 @@ +module "submodule" { + source = "./submodule" +} + output "changed" { value = "after" } diff --git a/backend/local/testdata/plan-outputs-changed/submodule/main.tf b/backend/local/testdata/plan-outputs-changed/submodule/main.tf new file mode 100644 index 000000000..ae32f8aa1 --- /dev/null +++ b/backend/local/testdata/plan-outputs-changed/submodule/main.tf @@ -0,0 +1,3 @@ +output "foo" { + value = "bar" +} diff --git a/backend/local/testdata/plan-scaleout/main.tf b/backend/local/testdata/plan-scaleout/main.tf deleted file mode 100644 index 4fc97bafa..000000000 --- a/backend/local/testdata/plan-scaleout/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "test_instance" "foo" { - count = 2 - ami = "bar" - - # This is here because at some point it caused a test failure - network_interface { - device_index = 0 - description = "Main network interface" - } -} diff --git a/backend/local/testdata/refresh-provider-config/main.tf b/backend/local/testdata/refresh-provider-config/main.tf new file mode 100644 index 000000000..f3a3ebb85 --- /dev/null +++ b/backend/local/testdata/refresh-provider-config/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + ami = "bar" +} + +provider "test" { + value = "foo" +} diff --git a/backend/local/testing.go b/backend/local/testing.go index 0e6d426f1..ab2246454 100644 --- a/backend/local/testing.go +++ b/backend/local/testing.go @@ -72,7 +72,21 @@ func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.Pr if schema == nil { schema = &terraform.ProviderSchema{} // default schema is empty } - p.GetSchemaReturn = schema + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{Block: schema.Provider}, + ProviderMeta: providers.Schema{Block: schema.ProviderMeta}, + ResourceTypes: map[string]providers.Schema{}, + DataSources: map[string]providers.Schema{}, + } + for name, res := range schema.ResourceTypes { + p.GetSchemaResponse.ResourceTypes[name] = providers.Schema{ + Block: res, + Version: int64(schema.ResourceTypeSchemaVersions[name]), + } + } + for name, dat := range schema.DataSources { + p.GetSchemaResponse.DataSources[name] = providers.Schema{Block: dat} + } p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { rSchema, _ := schema.SchemaForResourceType(addrs.ManagedResourceMode, req.TypeName) @@ -111,7 +125,7 @@ func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.Pr b.ContextOpts = &terraform.ContextOpts{} } - // Setup our provider + // Set up our provider b.ContextOpts.Providers = map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider(name): providers.FactoryFixed(p), } diff --git a/backend/remote-state/artifactory/backend.go b/backend/remote-state/artifactory/backend.go index 2062968af..8f504a610 100644 --- a/backend/remote-state/artifactory/backend.go +++ b/backend/remote-state/artifactory/backend.go @@ -5,7 +5,7 @@ import ( cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/hashicorp/terraform/states/remote" "github.com/hashicorp/terraform/states/statemgr" artifactory "github.com/lusis/go-artifactory/src/artifactory.v401" diff --git a/backend/remote-state/azure/backend.go b/backend/remote-state/azure/backend.go index 00995d97a..7e899ad9d 100644 --- a/backend/remote-state/azure/backend.go +++ b/backend/remote-state/azure/backend.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" ) // New creates a new backend for Azure remote state. diff --git a/backend/remote-state/azure/backend_test.go b/backend/remote-state/azure/backend_test.go index 3f0a28534..9d9d2d3ae 100644 --- a/backend/remote-state/azure/backend_test.go +++ b/backend/remote-state/azure/backend_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/internal/legacy/helper/acctest" ) func TestBackend_impl(t *testing.T) { diff --git a/backend/remote-state/azure/client_test.go b/backend/remote-state/azure/client_test.go index 45af094aa..c254e9ca1 100644 --- a/backend/remote-state/azure/client_test.go +++ b/backend/remote-state/azure/client_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/internal/legacy/helper/acctest" "github.com/hashicorp/terraform/states/remote" "github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs" ) diff --git a/backend/remote-state/consul/backend.go b/backend/remote-state/consul/backend.go index 271a60b63..ebe62471b 100644 --- a/backend/remote-state/consul/backend.go +++ b/backend/remote-state/consul/backend.go @@ -8,7 +8,7 @@ import ( consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" ) // New creates a new backend for Consul remote state. diff --git a/backend/remote-state/cos/backend.go b/backend/remote-state/cos/backend.go index ce502e5cc..fa358aa10 100644 --- a/backend/remote-state/cos/backend.go +++ b/backend/remote-state/cos/backend.go @@ -9,7 +9,7 @@ import ( "time" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813" diff --git a/backend/remote-state/etcdv2/backend.go b/backend/remote-state/etcdv2/backend.go index 9f9fa0904..ee0f0bda5 100644 --- a/backend/remote-state/etcdv2/backend.go +++ b/backend/remote-state/etcdv2/backend.go @@ -8,7 +8,7 @@ import ( etcdapi "github.com/coreos/etcd/client" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/hashicorp/terraform/states/remote" "github.com/hashicorp/terraform/states/statemgr" ) diff --git a/backend/remote-state/etcdv3/backend.go b/backend/remote-state/etcdv3/backend.go index fb3f5e202..1bf5809bf 100644 --- a/backend/remote-state/etcdv3/backend.go +++ b/backend/remote-state/etcdv3/backend.go @@ -6,7 +6,7 @@ import ( etcdv3 "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/transport" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" ) const ( diff --git a/backend/remote-state/gcs/backend.go b/backend/remote-state/gcs/backend.go index 1ad3078b9..8184dafbd 100644 --- a/backend/remote-state/gcs/backend.go +++ b/backend/remote-state/gcs/backend.go @@ -11,8 +11,8 @@ import ( "cloud.google.com/go/storage" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" "google.golang.org/api/option" @@ -27,9 +27,8 @@ type Backend struct { storageClient *storage.Client storageContext context.Context - bucketName string - prefix string - defaultStateFile string + bucketName string + prefix string encryptionKey []byte } @@ -45,13 +44,6 @@ func New() backend.Backend { Description: "The name of the Google Cloud Storage bucket", }, - "path": { - Type: schema.TypeString, - Optional: true, - Description: "Path of the default state file", - Deprecated: "Use the \"prefix\" option instead", - }, - "prefix": { Type: schema.TypeString, Optional: true, @@ -74,6 +66,22 @@ func New() backend.Backend { Description: "An OAuth2 token used for GCP authentication", }, + "impersonate_service_account": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", + }, nil), + Description: "The service account to impersonate for all Google API Calls", + }, + + "impersonate_service_account_delegates": { + Type: schema.TypeList, + Optional: true, + Description: "The delegation chain for the impersonated service account", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "encryption_key": { Type: schema.TypeString, Optional: true, @@ -121,8 +129,6 @@ func (b *Backend) configure(ctx context.Context) error { b.prefix = b.prefix + "/" } - b.defaultStateFile = strings.TrimLeft(data.Get("path").(string), "/") - var opts []option.ClientOption // Add credential source @@ -168,6 +174,24 @@ func (b *Backend) configure(ctx context.Context) error { opts = append(opts, option.WithScopes(storage.ScopeReadWrite)) } + // Service Account Impersonation + if v, ok := data.GetOk("impersonate_service_account"); ok { + ServiceAccount := v.(string) + opts = append(opts, option.ImpersonateCredentials(ServiceAccount)) + + if v, ok := data.GetOk("impersonate_service_account_delegates"); ok { + var delegates []string + d := v.([]interface{}) + if len(delegates) > 0 { + delegates = make([]string, len(d)) + } + for _, delegate := range d { + delegates = append(delegates, delegate.(string)) + } + opts = append(opts, option.ImpersonateCredentials(ServiceAccount, delegates...)) + } + } + opts = append(opts, option.WithUserAgent(httpclient.UserAgentString())) client, err := storage.NewClient(b.storageContext, opts...) if err != nil { diff --git a/backend/remote-state/gcs/backend_state.go b/backend/remote-state/gcs/backend_state.go index d4916190f..a7e511cd2 100644 --- a/backend/remote-state/gcs/backend_state.go +++ b/backend/remote-state/gcs/backend_state.go @@ -146,15 +146,9 @@ func (b *Backend) StateMgr(name string) (statemgr.Full, error) { } func (b *Backend) stateFile(name string) string { - if name == backend.DefaultStateName && b.defaultStateFile != "" { - return b.defaultStateFile - } return path.Join(b.prefix, name+stateFileSuffix) } func (b *Backend) lockFile(name string) string { - if name == backend.DefaultStateName && b.defaultStateFile != "" { - return strings.TrimSuffix(b.defaultStateFile, stateFileSuffix) + lockFileSuffix - } return path.Join(b.prefix, name+lockFileSuffix) } diff --git a/backend/remote-state/gcs/backend_test.go b/backend/remote-state/gcs/backend_test.go index 6d71cb341..dd089aeb4 100644 --- a/backend/remote-state/gcs/backend_test.go +++ b/backend/remote-state/gcs/backend_test.go @@ -25,23 +25,19 @@ func TestStateFile(t *testing.T) { t.Parallel() cases := []struct { - prefix string - defaultStateFile string - name string - wantStateFile string - wantLockFile string + prefix string + name string + wantStateFile string + wantLockFile string }{ - {"state", "", "default", "state/default.tfstate", "state/default.tflock"}, - {"state", "", "test", "state/test.tfstate", "state/test.tflock"}, - {"state", "legacy.tfstate", "default", "legacy.tfstate", "legacy.tflock"}, - {"state", "legacy.tfstate", "test", "state/test.tfstate", "state/test.tflock"}, - {"state", "legacy.state", "default", "legacy.state", "legacy.state.tflock"}, - {"state", "legacy.state", "test", "state/test.tfstate", "state/test.tflock"}, + {"state", "default", "state/default.tfstate", "state/default.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, + {"state", "test", "state/test.tfstate", "state/test.tflock"}, } for _, c := range cases { b := &Backend{ - prefix: c.prefix, - defaultStateFile: c.defaultStateFile, + prefix: c.prefix, } if got := b.stateFile(c.name); got != c.wantStateFile { diff --git a/backend/remote-state/http/backend.go b/backend/remote-state/http/backend.go index 12076e01a..dee59f48c 100644 --- a/backend/remote-state/http/backend.go +++ b/backend/remote-state/http/backend.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/hashicorp/terraform/states/remote" "github.com/hashicorp/terraform/states/statemgr" ) diff --git a/backend/remote-state/http/client.go b/backend/remote-state/http/client.go index 9dd91ff24..4c299521f 100644 --- a/backend/remote-state/http/client.go +++ b/backend/remote-state/http/client.go @@ -49,7 +49,7 @@ func (c *httpClient) httpRequest(method string, url *url.URL, data *[]byte, what if err != nil { return nil, fmt.Errorf("Failed to make %s HTTP request: %s", what, err) } - // Setup basic auth + // Set up basic auth if c.Username != "" { req.SetBasicAuth(c.Username, c.Password) } diff --git a/backend/remote-state/inmem/backend.go b/backend/remote-state/inmem/backend.go index 1a974a05b..035f3c973 100644 --- a/backend/remote-state/inmem/backend.go +++ b/backend/remote-state/inmem/backend.go @@ -9,7 +9,7 @@ import ( "time" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" statespkg "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/remote" "github.com/hashicorp/terraform/states/statemgr" diff --git a/backend/remote-state/kubernetes/backend.go b/backend/remote-state/kubernetes/backend.go index eed598f8e..12530b0bf 100644 --- a/backend/remote-state/kubernetes/backend.go +++ b/backend/remote-state/kubernetes/backend.go @@ -8,7 +8,7 @@ import ( "os" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/hashicorp/terraform/version" "github.com/mitchellh/cli" "github.com/mitchellh/go-homedir" diff --git a/backend/remote-state/kubernetes/log.txt b/backend/remote-state/kubernetes/log.txt deleted file mode 100644 index 38cae58d0..000000000 --- a/backend/remote-state/kubernetes/log.txt +++ /dev/null @@ -1,1995 +0,0 @@ -=== RUN TestBackendLocksSoak - TestBackendLocksSoak: backend_test.go:122: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:94: Error creating state manager: the state is already locked by another terraform client - Lock Info: - ID: 3e2169be-df88-a073-df20-d5d9863abd5d - Path: - Operation: init - Who: john@Johns-MacBook-Pro.local - Version: 0.13.0 - Created: 2020-06-05 16:54:18.347493 +0000 UTC - Info: - TestBackendLocksSoak: backend_test.go:94: Error creating state manager: the state is already locked by another terraform client - Lock Info: - ID: 3e2169be-df88-a073-df20-d5d9863abd5d - Path: - Operation: init - Who: john@Johns-MacBook-Pro.local - Version: 0.13.0 - Created: 2020-06-05 16:54:18.347493 +0000 UTC - Info: - TestBackendLocksSoak: backend_test.go:94: Error creating state manager: the state is already locked by another terraform client - Lock Info: - ID: 3e2169be-df88-a073-df20-d5d9863abd5d - Path: - Operation: init - Who: john@Johns-MacBook-Pro.local - Version: 0.13.0 - Created: 2020-06-05 16:54:18.347493 +0000 UTC - Info: - TestBackendLocksSoak: backend_test.go:94: Error creating state manager: the state is already locked by another terraform client - Lock Info: - ID: 3e2169be-df88-a073-df20-d5d9863abd5d - Path: - Operation: init - Who: john@Johns-MacBook-Pro.local - Version: 0.13.0 - Created: 2020-06-05 16:54:18.347493 +0000 UTC - Info: - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} - TestBackendLocksSoak: backend_test.go:88: TestBackendConfig on *kubernetes.Backend with configs.synthBody{Filename:"", Values:map[string]cty.Value{"secret_suffix":cty.StringVal("test-state")}} -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -2020/06/05 12:54:18 [INFO] Successfully loaded config file (/Users/john/.kube/config; default context) -panic: interface conversion: interface is nil, not statemgr.Locker - -goroutine 203 [running]: -github.com/hashicorp/terraform/backend/remote-state/kubernetes.TestBackendLocksSoak.func1(0xc000157200, 0xc00038cc00, 0xc00038cc10, 0x85) - /Users/john/dev/hashicorp/terraform/backend/remote-state/kubernetes/backend_test.go:103 +0x290 -created by github.com/hashicorp/terraform/backend/remote-state/kubernetes.TestBackendLocksSoak - /Users/john/dev/hashicorp/terraform/backend/remote-state/kubernetes/backend_test.go:87 +0xef -FAIL github.com/hashicorp/terraform/backend/remote-state/kubernetes 3.487s -FAIL diff --git a/backend/remote-state/manta/backend.go b/backend/remote-state/manta/backend.go index 9189ad890..c7e32403b 100644 --- a/backend/remote-state/manta/backend.go +++ b/backend/remote-state/manta/backend.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" triton "github.com/joyent/triton-go" "github.com/joyent/triton-go/authentication" "github.com/joyent/triton-go/storage" diff --git a/backend/remote-state/oss/backend.go b/backend/remote-state/oss/backend.go index 9b1227634..f87c5f467 100644 --- a/backend/remote-state/oss/backend.go +++ b/backend/remote-state/oss/backend.go @@ -25,7 +25,7 @@ import ( "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/hashicorp/terraform/version" "github.com/jmespath/go-jmespath" "github.com/mitchellh/go-homedir" diff --git a/backend/remote-state/pg/backend.go b/backend/remote-state/pg/backend.go index 0191176a9..db1b01461 100644 --- a/backend/remote-state/pg/backend.go +++ b/backend/remote-state/pg/backend.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/lib/pq" ) diff --git a/backend/remote-state/s3/backend.go b/backend/remote-state/s3/backend.go index b656e3953..90570d8e6 100644 --- a/backend/remote-state/s3/backend.go +++ b/backend/remote-state/s3/backend.go @@ -12,7 +12,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3" awsbase "github.com/hashicorp/aws-sdk-go-base" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/hashicorp/terraform/internal/logging" "github.com/hashicorp/terraform/version" ) @@ -327,7 +327,7 @@ func (b *Backend) configure(ctx context.Context) error { AssumeRoleExternalID: data.Get("external_id").(string), AssumeRolePolicy: data.Get("assume_role_policy").(string), AssumeRoleSessionName: data.Get("session_name").(string), - CallerDocumentationURL: "https://www.terraform.io/docs/backends/types/s3.html", + CallerDocumentationURL: "https://www.terraform.io/docs/language/settings/backends/s3.html", CallerName: "S3 Backend", CredsFilename: data.Get("shared_credentials_file").(string), DebugLogging: logging.IsDebugOrHigher(), diff --git a/backend/remote-state/swift/backend.go b/backend/remote-state/swift/backend.go index 3a22438f5..ca7571c71 100644 --- a/backend/remote-state/swift/backend.go +++ b/backend/remote-state/swift/backend.go @@ -12,7 +12,7 @@ import ( "github.com/gophercloud/utils/terraform/auth" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/internal/legacy/helper/schema" "github.com/hashicorp/terraform/version" ) diff --git a/backend/remote/backend.go b/backend/remote/backend.go index 77ad26225..c79ff32e8 100644 --- a/backend/remote/backend.go +++ b/backend/remote/backend.go @@ -85,6 +85,12 @@ type Remote struct { // opLock locks operations opLock sync.Mutex + + // ignoreVersionConflict, if true, will disable the requirement that the + // local Terraform version matches the remote workspace's configured + // version. This will also cause VerifyWorkspaceTerraformVersion to return + // a warning diagnostic instead of an error. + ignoreVersionConflict bool } var _ backend.Backend = (*Remote)(nil) @@ -629,6 +635,20 @@ func (b *Remote) StateMgr(name string) (statemgr.Full, error) { } } + // This is a fallback error check. Most code paths should use other + // mechanisms to check the version, then set the ignoreVersionConflict + // field to true. This check is only in place to ensure that we don't + // accidentally upgrade state with a new code path, and the version check + // logic is coarser and simpler. + if !b.ignoreVersionConflict { + wsv := workspace.TerraformVersion + // Explicitly ignore the pseudo-version "latest" here, as it will cause + // plan and apply to always fail. + if wsv != tfversion.String() && wsv != "latest" { + return nil, fmt.Errorf("Remote workspace Terraform version %q does not match local Terraform version %q", workspace.TerraformVersion, tfversion.String()) + } + } + client := &remoteClient{ client: b.client, organization: b.organization, @@ -674,6 +694,17 @@ func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend } } + // Terraform remote version conflicts are not a concern for operations. We + // are in one of three states: + // + // - Running remotely, in which case the local version is irrelevant; + // - Workspace configured for local operations, in which case the remote + // version is meaningless; + // - Forcing local operations with a remote backend, which should only + // happen in the Terraform Cloud worker, in which case the Terraform + // versions by definition match. + b.IgnoreVersionConflict() + // Check if we need to use the local backend to run the operation. if b.forceLocal || !w.Operations { return b.local.Operation(ctx, op) @@ -837,6 +868,114 @@ func (b *Remote) ReportResult(op *backend.RunningOperation, err error) { } } +// IgnoreVersionConflict allows commands to disable the fall-back check that +// the local Terraform version matches the remote workspace's configured +// Terraform version. This should be called by commands where this check is +// unnecessary, such as those performing remote operations, or read-only +// operations. It will also be called if the user uses a command-line flag to +// override this check. +func (b *Remote) IgnoreVersionConflict() { + b.ignoreVersionConflict = true +} + +// VerifyWorkspaceTerraformVersion compares the local Terraform version against +// the workspace's configured Terraform version. If they are equal, this means +// that there are no compatibility concerns, so it returns no diagnostics. +// +// If the versions differ, +func (b *Remote) VerifyWorkspaceTerraformVersion(workspaceName string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + workspace, err := b.getRemoteWorkspace(context.Background(), workspaceName) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error looking up workspace", + fmt.Sprintf("Workspace read failed: %s", err), + )) + return diags + } + + // If the workspace has the pseudo-version "latest", all bets are off. We + // cannot reasonably determine what the intended Terraform version is, so + // we'll skip version verification. + if workspace.TerraformVersion == "latest" { + return nil + } + + // If the workspace has remote operations disabled, the remote Terraform + // version is effectively meaningless, so we'll skip version verification. + if workspace.Operations == false { + return nil + } + + remoteVersion, err := version.NewSemver(workspace.TerraformVersion) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error looking up workspace", + fmt.Sprintf("Invalid Terraform version: %s", err), + )) + return diags + } + + v014 := version.Must(version.NewSemver("0.14.0")) + if tfversion.SemVer.LessThan(v014) || remoteVersion.LessThan(v014) { + // Versions of Terraform prior to 0.14.0 will refuse to load state files + // written by a newer version of Terraform, even if it is only a patch + // level difference. As a result we require an exact match. + if tfversion.SemVer.Equal(remoteVersion) { + return diags + } + } + if tfversion.SemVer.GreaterThanOrEqual(v014) && remoteVersion.GreaterThanOrEqual(v014) { + // Versions of Terraform after 0.14.0 should be compatible with each + // other. At the time this code was written, the only constraints we + // are aware of are: + // + // - 0.14.0 is guaranteed to be compatible with versions up to but not + // including 1.1.0 + v110 := version.Must(version.NewSemver("1.1.0")) + if tfversion.SemVer.LessThan(v110) && remoteVersion.LessThan(v110) { + return diags + } + // - Any new Terraform state version will require at least minor patch + // increment, so x.y.* will always be compatible with each other + tfvs := tfversion.SemVer.Segments64() + rwvs := remoteVersion.Segments64() + if len(tfvs) == 3 && len(rwvs) == 3 && tfvs[0] == rwvs[0] && tfvs[1] == rwvs[1] { + return diags + } + } + + // Even if ignoring version conflicts, it may still be useful to call this + // method and warn the user about a mismatch between the local and remote + // Terraform versions. + severity := tfdiags.Error + if b.ignoreVersionConflict { + severity = tfdiags.Warning + } + + suggestion := " If you're sure you want to upgrade the state, you can force Terraform to continue using the -ignore-remote-version flag. This may result in an unusable workspace." + if b.ignoreVersionConflict { + suggestion = "" + } + diags = diags.Append(tfdiags.Sourceless( + severity, + "Terraform version mismatch", + fmt.Sprintf( + "The local Terraform version (%s) does not match the configured version for remote workspace %s/%s (%s).%s", + tfversion.String(), + b.organization, + workspace.Name, + workspace.TerraformVersion, + suggestion, + ), + )) + + return diags +} + // Colorize returns the Colorize structure that can be used for colorizing // output. This is guaranteed to always return a non-nil value and so useful // as a helper to wrap any potentially colored strings. diff --git a/backend/remote/backend_apply.go b/backend/remote/backend_apply.go index 5db412baa..f03323d24 100644 --- a/backend/remote/backend_apply.go +++ b/backend/remote/backend_apply.go @@ -170,8 +170,7 @@ func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operati return r, diags.Err() } - mustConfirm := (op.UIIn != nil && op.UIOut != nil) && - ((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove)) + mustConfirm := (op.UIIn != nil && op.UIOut != nil) && !op.AutoApprove if !w.AutoApply { if mustConfirm { diff --git a/backend/remote/backend_apply_test.go b/backend/remote/backend_apply_test.go index 7fe5c1c0c..c39587867 100644 --- a/backend/remote/backend_apply_test.go +++ b/backend/remote/backend_apply_test.go @@ -11,12 +11,14 @@ import ( "github.com/google/go-cmp/cmp" tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/internal/initwd" "github.com/hashicorp/terraform/plans/planfile" "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" + tfversion "github.com/hashicorp/terraform/version" "github.com/mitchellh/cli" ) @@ -542,8 +544,8 @@ func TestRemote_applyApprovedExternally(t *testing.T) { t.Fatalf("error starting operation: %v", err) } - // Wait 2 seconds to make sure the run started. - time.Sleep(2 * time.Second) + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) wl, err := b.client.Workspaces.List( ctx, @@ -617,8 +619,8 @@ func TestRemote_applyDiscardedExternally(t *testing.T) { t.Fatalf("error starting operation: %v", err) } - // Wait 2 seconds to make sure the run started. - time.Sleep(2 * time.Second) + // Wait 50 milliseconds to make sure the run started. + time.Sleep(50 * time.Millisecond) wl, err := b.client.Workspaces.List( ctx, @@ -773,8 +775,8 @@ func TestRemote_applyForceLocal(t *testing.T) { if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { t.Fatalf("expected plan summery in output: %s", output) } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) + if !run.State.HasResources() { + t.Fatalf("expected resources in state") } } @@ -831,8 +833,8 @@ func TestRemote_applyWorkspaceWithoutOperations(t *testing.T) { if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { t.Fatalf("expected plan summery in output: %s", output) } - if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("expected apply summery in output: %s", output) + if !run.State.HasResources() { + t.Fatalf("expected resources in state") } } @@ -871,7 +873,7 @@ func TestRemote_applyLockTimeout(t *testing.T) { "approve": "yes", }) - op.StateLockTimeout = 5 * time.Second + op.StateLockTimeout = 50 * time.Millisecond op.UIIn = input op.UIOut = b.CLI op.Workspace = backend.DefaultStateName @@ -887,8 +889,8 @@ func TestRemote_applyLockTimeout(t *testing.T) { case <-sigint: // Stop redirecting SIGINT signals. signal.Stop(sigint) - case <-time.After(10 * time.Second): - t.Fatalf("expected lock timeout after 5 seconds, waited 10 seconds") + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") } if len(input.answers) != 2 { @@ -1277,3 +1279,141 @@ func TestRemote_applyWithRemoteError(t *testing.T) { t.Fatalf("expected apply error in output: %s", output) } } + +func TestRemote_applyVersionCheck(t *testing.T) { + testCases := map[string]struct { + localVersion string + remoteVersion string + forceLocal bool + hasOperations bool + wantErr string + }{ + "versions can be different for remote apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + hasOperations: true, + }, + "versions can be different for local apply": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + hasOperations: false, + }, + "force local with remote operations and different versions is acceptable": { + localVersion: "0.14.0", + remoteVersion: "0.14.0-acme-provider-bundle", + forceLocal: true, + hasOperations: true, + }, + "no error if versions are identical": { + localVersion: "0.14.0", + remoteVersion: "0.14.0", + forceLocal: true, + hasOperations: true, + }, + "no error if force local but workspace has remote operations disabled": { + localVersion: "0.14.0", + remoteVersion: "0.13.5", + forceLocal: true, + hasOperations: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // SETUP: Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // SETUP: Set local version for the test case + tfversion.Prerelease = "" + tfversion.Version = tc.localVersion + tfversion.SemVer = version.Must(version.NewSemver(tc.localVersion)) + + // SETUP: Set force local for the test case + b.forceLocal = tc.forceLocal + + ctx := context.Background() + + // SETUP: set the operations and Terraform Version fields on the + // remote workspace + _, err := b.client.Workspaces.Update( + ctx, + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + Operations: tfe.Bool(tc.hasOperations), + TerraformVersion: tfe.String(tc.remoteVersion), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + // RUN: prepare the apply operation and run it + op, configCleanup := testOperationApply(t, "./testdata/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + // RUN: wait for completion + <-run.Done() + + if tc.wantErr != "" { + // ASSERT: if the test case wants an error, check for failure + // and the error message + if run.Result != backend.OperationFailure { + t.Fatalf("expected run to fail, but result was %#v", run.Result) + } + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, tc.wantErr) { + t.Fatalf("missing error %q\noutput: %s", tc.wantErr, errOutput) + } + } else { + // ASSERT: otherwise, check for success and appropriate output + // based on whether the run should be local or remote + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + output := b.CLI.(*cli.MockUi).OutputWriter.String() + hasRemote := strings.Contains(output, "Running apply in the remote backend") + hasSummary := strings.Contains(output, "1 added, 0 changed, 0 destroyed") + hasResources := run.State.HasResources() + if !tc.forceLocal && tc.hasOperations { + if !hasRemote { + t.Errorf("missing remote backend header in output: %s", output) + } + if !hasSummary { + t.Errorf("expected apply summary in output: %s", output) + } + } else { + if hasRemote { + t.Errorf("unexpected remote backend header in output: %s", output) + } + if !hasResources { + t.Errorf("expected resources in state") + } + } + } + }) + } +} diff --git a/backend/remote/backend_common.go b/backend/remote/backend_common.go index 7fe5373db..fb390acad 100644 --- a/backend/remote/backend_common.go +++ b/backend/remote/backend_common.go @@ -24,6 +24,13 @@ var ( errRunOverridden = errors.New("overridden using the UI or API") ) +var ( + backoffMin = 1000.0 + backoffMax = 3000.0 + + runPollInterval = 3 * time.Second +) + // backoff will perform exponential backoff based on the iteration and // limited by the provided min and max (in milliseconds) durations. func backoff(min, max float64, iter int) time.Duration { @@ -43,7 +50,7 @@ func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Oper return r, stopCtx.Err() case <-cancelCtx.Done(): return r, cancelCtx.Err() - case <-time.After(backoff(1000, 3000, i)): + case <-time.After(backoff(backoffMin, backoffMax, i)): // Timer up, show status } @@ -243,15 +250,7 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op return nil } - if b.CLI != nil { - b.CLI.Output("\n------------------------------------------------------------------------\n") - } - msgPrefix := "Cost estimation" - if b.CLI != nil { - b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) - } - started := time.Now() updated := started for i := 0; ; i++ { @@ -260,7 +259,7 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op return stopCtx.Err() case <-cancelCtx.Done(): return cancelCtx.Err() - case <-time.After(1 * time.Second): + case <-time.After(backoff(backoffMin, backoffMax, i)): } // Retrieve the cost estimate to get its current status. @@ -277,6 +276,12 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op } } + // checking if i == 0 so as to avoid printing this starting horizontal-rule + // every retry, and that it only prints it on the first (i=0) attempt. + if b.CLI != nil && i == 0 { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + switch ce.Status { case tfe.CostEstimateFinished: delta, err := strconv.ParseFloat(ce.DeltaMonthlyCost, 64) @@ -292,6 +297,7 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op deltaRepr := strings.Replace(ce.DeltaMonthlyCost, "-", "", 1) if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Resources: %d of %d estimated", ce.MatchedResourcesCount, ce.ResourcesCount))) b.CLI.Output(b.Colorize().Color(fmt.Sprintf(" $%s/mo %s$%s", ce.ProposedMonthlyCost, sign, deltaRepr))) @@ -313,16 +319,17 @@ func (b *Remote) costEstimate(stopCtx, cancelCtx context.Context, op *backend.Op elapsed = fmt.Sprintf( " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) } + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) b.CLI.Output(b.Colorize().Color("Waiting for cost estimate to complete..." + elapsed + "\n")) } continue case tfe.CostEstimateSkippedDueToTargeting: + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) b.CLI.Output("Not available for this plan, because it was created with the -target option.") b.CLI.Output("\n------------------------------------------------------------------------") return nil case tfe.CostEstimateErrored: - b.CLI.Output(msgPrefix + " errored:\n") - b.CLI.Output(ce.ErrorMessage) + b.CLI.Output(msgPrefix + " errored.\n") b.CLI.Output("\n------------------------------------------------------------------------") return nil case tfe.CostEstimateCanceled: @@ -455,7 +462,7 @@ func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *t return case <-stopCtx.Done(): return - case <-time.After(3 * time.Second): + case <-time.After(runPollInterval): // Retrieve the run again to get its current status. r, err := b.client.Runs.Read(stopCtx, r.ID) if err != nil { @@ -489,10 +496,10 @@ func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *t } if err == errRunDiscarded { + err = errApplyDiscarded if op.Destroy { err = errDestroyDiscarded } - err = errApplyDiscarded } result <- err diff --git a/backend/remote/backend_context.go b/backend/remote/backend_context.go index 13202f547..577c92d92 100644 --- a/backend/remote/backend_context.go +++ b/backend/remote/backend_context.go @@ -156,11 +156,20 @@ func (b *Remote) getRemoteWorkspaceName(localWorkspaceName string) string { } } -func (b *Remote) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) { +func (b *Remote) getRemoteWorkspace(ctx context.Context, localWorkspaceName string) (*tfe.Workspace, error) { remoteWorkspaceName := b.getRemoteWorkspaceName(localWorkspaceName) - log.Printf("[TRACE] backend/remote: looking up workspace id for %s/%s", b.organization, remoteWorkspaceName) + log.Printf("[TRACE] backend/remote: looking up workspace for %s/%s", b.organization, remoteWorkspaceName) remoteWorkspace, err := b.client.Workspaces.Read(ctx, b.organization, remoteWorkspaceName) + if err != nil { + return nil, err + } + + return remoteWorkspace, nil +} + +func (b *Remote) getRemoteWorkspaceID(ctx context.Context, localWorkspaceName string) (string, error) { + remoteWorkspace, err := b.getRemoteWorkspace(ctx, localWorkspaceName) if err != nil { return "", err } diff --git a/backend/remote/backend_context_test.go b/backend/remote/backend_context_test.go index 48a6a8525..1a214deb9 100644 --- a/backend/remote/backend_context_test.go +++ b/backend/remote/backend_context_test.go @@ -195,7 +195,7 @@ func TestRemoteContextWithVars(t *testing.T) { key := "key" v.Key = &key } - b.client.Variables.Create(nil, workspaceID, *v) + b.client.Variables.Create(context.TODO(), workspaceID, *v) _, _, diags := b.Context(op) diff --git a/backend/remote/backend_mock.go b/backend/remote/backend_mock.go index eaa81cbfc..2d0652921 100644 --- a/backend/remote/backend_mock.go +++ b/backend/remote/backend_mock.go @@ -17,6 +17,7 @@ import ( tfe "github.com/hashicorp/go-tfe" "github.com/hashicorp/terraform/terraform" + tfversion "github.com/hashicorp/terraform/version" "github.com/mitchellh/copystructure" ) @@ -360,7 +361,7 @@ func (m *mockLogReader) Read(l []byte) (int, error) { if written, err := m.read(l); err != io.ErrNoProgress { return written, err } - time.Sleep(500 * time.Millisecond) + time.Sleep(1 * time.Millisecond) } } @@ -1124,10 +1125,15 @@ func (m *mockWorkspaces) List(ctx context.Context, organization string, options } func (m *mockWorkspaces) Create(ctx context.Context, organization string, options tfe.WorkspaceCreateOptions) (*tfe.Workspace, error) { + if strings.HasSuffix(*options.Name, "no-operations") { + options.Operations = tfe.Bool(false) + } else if options.Operations == nil { + options.Operations = tfe.Bool(true) + } w := &tfe.Workspace{ ID: generateID("ws-"), Name: *options.Name, - Operations: !strings.HasSuffix(*options.Name, "no-operations"), + Operations: *options.Operations, Permissions: &tfe.WorkspacePermissions{ CanQueueApply: true, CanQueueRun: true, @@ -1139,6 +1145,11 @@ func (m *mockWorkspaces) Create(ctx context.Context, organization string, option if options.VCSRepo != nil { w.VCSRepo = &tfe.VCSRepo{} } + if options.TerraformVersion != nil { + w.TerraformVersion = *options.TerraformVersion + } else { + w.TerraformVersion = tfversion.String() + } m.workspaceIDs[w.ID] = w m.workspaceNames[w.Name] = w return w, nil @@ -1171,6 +1182,9 @@ func (m *mockWorkspaces) Update(ctx context.Context, organization, workspace str return nil, tfe.ErrResourceNotFound } + if options.Operations != nil { + w.Operations = *options.Operations + } if options.Name != nil { w.Name = *options.Name } diff --git a/backend/remote/backend_plan.go b/backend/remote/backend_plan.go index f9fcf82b6..e42ec9576 100644 --- a/backend/remote/backend_plan.go +++ b/backend/remote/backend_plan.go @@ -20,6 +20,8 @@ import ( "github.com/hashicorp/terraform/tfdiags" ) +var planConfigurationVersionsPollInterval = 500 * time.Millisecond + func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { log.Printf("[INFO] backend/remote: starting Plan operation") @@ -213,7 +215,7 @@ in order to capture the filesystem context the remote workspace expects: return nil, context.Canceled case <-cancelCtx.Done(): return nil, context.Canceled - case <-time.After(500 * time.Millisecond): + case <-time.After(planConfigurationVersionsPollInterval): cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) if err != nil { return nil, generalError("Failed to retrieve configuration version", err) diff --git a/backend/remote/backend_plan_test.go b/backend/remote/backend_plan_test.go index a2c6e4ad2..a2fecf2d5 100644 --- a/backend/remote/backend_plan_test.go +++ b/backend/remote/backend_plan_test.go @@ -620,7 +620,7 @@ func TestRemote_planLockTimeout(t *testing.T) { "approve": "yes", }) - op.StateLockTimeout = 5 * time.Second + op.StateLockTimeout = 50 * time.Millisecond op.UIIn = input op.UIOut = b.CLI op.Workspace = backend.DefaultStateName @@ -636,8 +636,8 @@ func TestRemote_planLockTimeout(t *testing.T) { case <-sigint: // Stop redirecting SIGINT signals. signal.Stop(sigint) - case <-time.After(10 * time.Second): - t.Fatalf("expected lock timeout after 5 seconds, waited 10 seconds") + case <-time.After(200 * time.Millisecond): + t.Fatalf("expected lock timeout after 50 milliseconds, waited 200 milliseconds") } if len(input.answers) != 2 { diff --git a/backend/remote/backend_test.go b/backend/remote/backend_test.go index 8c1e9a80f..0155ba590 100644 --- a/backend/remote/backend_test.go +++ b/backend/remote/backend_test.go @@ -1,13 +1,18 @@ package remote import ( + "context" + "fmt" "reflect" "strings" "testing" + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-svchost/disco" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/version" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" "github.com/zclconf/go-cty/cty" backendLocal "github.com/hashicorp/terraform/backend/local" @@ -196,11 +201,11 @@ func TestRemote_versionConstraints(t *testing.T) { } // Save and restore the actual version. - p := version.Prerelease - v := version.Version + p := tfversion.Prerelease + v := tfversion.Version defer func() { - version.Prerelease = p - version.Version = v + tfversion.Prerelease = p + tfversion.Version = v }() for name, tc := range cases { @@ -208,8 +213,8 @@ func TestRemote_versionConstraints(t *testing.T) { b := New(testDisco(s)) // Set the version for this test. - version.Prerelease = tc.prerelease - version.Version = tc.version + tfversion.Prerelease = tc.prerelease + tfversion.Version = tc.version // Validate _, valDiags := b.PrepareConfig(tc.config) @@ -428,17 +433,17 @@ func TestRemote_checkConstraints(t *testing.T) { } // Save and restore the actual version. - p := version.Prerelease - v := version.Version + p := tfversion.Prerelease + v := tfversion.Version defer func() { - version.Prerelease = p - version.Version = v + tfversion.Prerelease = p + tfversion.Version = v }() for name, tc := range cases { // Set the version for this test. - version.Prerelease = tc.prerelease - version.Version = tc.version + tfversion.Prerelease = tc.prerelease + tfversion.Version = tc.version // Check the constraints. diags := b.checkConstraints(tc.constraints) @@ -448,3 +453,264 @@ func TestRemote_checkConstraints(t *testing.T) { } } } + +func TestRemote_StateMgr_versionCheck(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // Some fixed versions for testing with. This logic is a simple string + // comparison, so we don't need many test cases. + v0135 := version.Must(version.NewSemver("0.13.5")) + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the mock remote workspace Terraform version to match the local + // Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0140.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Now change the remote workspace to a different Terraform version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(v0135.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should fail + want := `Remote workspace Terraform version "0.13.5" does not match local Terraform version "0.14.0"` + if _, err := b.StateMgr(backend.DefaultStateName); err.Error() != want { + t.Fatalf("wrong error\n got: %v\nwant: %v", err.Error(), want) + } +} + +func TestRemote_StateMgr_versionCheckLatest(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + v0140 := version.Must(version.NewSemver("0.14.0")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // For this test, the local Terraform version is set to 0.14.0 + tfversion.Prerelease = "" + tfversion.Version = v0140.String() + tfversion.SemVer = v0140 + + // Update the remote workspace to the pseudo-version "latest" + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("latest"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + // This should succeed despite not being a string match + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion(t *testing.T) { + testCases := []struct { + local string + remote string + operations bool + wantErr bool + }{ + {"0.13.5", "0.13.5", true, false}, + {"0.14.0", "0.13.5", true, true}, + {"0.14.0", "0.13.5", false, false}, + {"0.14.0", "0.14.1", true, false}, + {"0.14.0", "1.0.99", true, false}, + {"0.14.0", "1.1.0", true, true}, + {"1.2.0", "1.2.99", true, false}, + {"1.2.0", "1.3.0", true, true}, + {"0.15.0", "latest", true, false}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("local %s, remote %s", tc.local, tc.remote), func(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + local := version.Must(version.NewSemver(tc.local)) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace Terraform version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + Operations: tfe.Bool(tc.operations), + TerraformVersion: tfe.String(tc.remote), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if tc.wantErr { + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Terraform version mismatch") { + t.Fatalf("unexpected error: %s", got) + } + } else { + if len(diags) != 0 { + t.Fatalf("unexpected diags: %s", diags.Err()) + } + } + }) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion_workspaceErrors(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // Attempting to check the version against a workspace which doesn't exist + // should fail + diags := b.VerifyWorkspaceTerraformVersion("invalid-workspace") + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Workspace read failed") { + t.Fatalf("unexpected error: %s", got) + } + + // Update the mock remote workspace Terraform version to an invalid version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String("1.0.cheetarah"), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + diags = b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + if got := diags.Err().Error(); !strings.Contains(got, "Error looking up workspace: Invalid Terraform version") { + t.Fatalf("unexpected error: %s", got) + } +} + +func TestRemote_VerifyWorkspaceTerraformVersion_ignoreFlagSet(t *testing.T) { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + // If the ignore flag is set, the behaviour changes + b.IgnoreVersionConflict() + + // Different local & remote versions to cause an error + local := version.Must(version.NewSemver("0.14.0")) + remote := version.Must(version.NewSemver("0.13.5")) + + // Save original local version state and restore afterwards + p := tfversion.Prerelease + v := tfversion.Version + s := tfversion.SemVer + defer func() { + tfversion.Prerelease = p + tfversion.Version = v + tfversion.SemVer = s + }() + + // Override local version as specified + tfversion.Prerelease = "" + tfversion.Version = local.String() + tfversion.SemVer = local + + // Update the mock remote workspace Terraform version to the + // specified remote version + if _, err := b.client.Workspaces.Update( + context.Background(), + b.organization, + b.workspace, + tfe.WorkspaceUpdateOptions{ + TerraformVersion: tfe.String(remote.String()), + }, + ); err != nil { + t.Fatalf("error: %v", err) + } + + diags := b.VerifyWorkspaceTerraformVersion(backend.DefaultStateName) + if len(diags) != 1 { + t.Fatal("expected diag, but none returned") + } + + if got, want := diags[0].Severity(), tfdiags.Warning; got != want { + t.Errorf("wrong severity: got %#v, want %#v", got, want) + } + if got, want := diags[0].Description().Summary, "Terraform version mismatch"; got != want { + t.Errorf("wrong summary: got %s, want %s", got, want) + } + wantDetail := "The local Terraform version (0.14.0) does not match the configured version for remote workspace hashicorp/prod (0.13.5)." + if got := diags[0].Description().Detail; got != wantDetail { + t.Errorf("wrong summary: got %s, want %s", got, wantDetail) + } +} diff --git a/backend/remote/remote_test.go b/backend/remote/remote_test.go index dbd0a72d6..f4cc3c5c2 100644 --- a/backend/remote/remote_test.go +++ b/backend/remote/remote_test.go @@ -4,6 +4,7 @@ import ( "flag" "os" "testing" + "time" _ "github.com/hashicorp/terraform/internal/logging" ) @@ -14,5 +15,11 @@ func TestMain(m *testing.M) { // Make sure TF_FORCE_LOCAL_BACKEND is unset os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + // Reduce delays to make tests run faster + backoffMin = 1.0 + backoffMax = 1.0 + planConfigurationVersionsPollInterval = 1 * time.Millisecond + runPollInterval = 1 * time.Millisecond + os.Exit(m.Run()) } diff --git a/backend/remote/testing.go b/backend/remote/testing.go index 9f152b5d1..07ea95358 100644 --- a/backend/remote/testing.go +++ b/backend/remote/testing.go @@ -175,7 +175,7 @@ func testLocalBackend(t *testing.T, remote *Remote) backend.Enhanced { }, }, }) - p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), })} diff --git a/builtin/bins/provider-test/main.go b/builtin/bins/provider-test/main.go deleted file mode 100644 index 97d03f258..000000000 --- a/builtin/bins/provider-test/main.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/builtin/providers/test" - "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/terraform" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: func() terraform.ResourceProvider { - return test.Provider() - }, - }) -} diff --git a/builtin/bins/provisioner-chef/main.go b/builtin/bins/provisioner-chef/main.go deleted file mode 100644 index 6e81fde49..000000000 --- a/builtin/bins/provisioner-chef/main.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/builtin/provisioners/chef" - "github.com/hashicorp/terraform/plugin" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: chef.Provisioner, - }) -} diff --git a/builtin/bins/provisioner-file/main.go b/builtin/bins/provisioner-file/main.go deleted file mode 100644 index c0982b0b2..000000000 --- a/builtin/bins/provisioner-file/main.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/builtin/provisioners/file" - "github.com/hashicorp/terraform/plugin" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: file.Provisioner, - }) -} diff --git a/builtin/bins/provisioner-habitat/main.go b/builtin/bins/provisioner-habitat/main.go deleted file mode 100644 index 0311b4f27..000000000 --- a/builtin/bins/provisioner-habitat/main.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/builtin/provisioners/habitat" - "github.com/hashicorp/terraform/plugin" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: habitat.Provisioner, - }) -} diff --git a/builtin/bins/provisioner-local-exec/main.go b/builtin/bins/provisioner-local-exec/main.go deleted file mode 100644 index 2e0433ff5..000000000 --- a/builtin/bins/provisioner-local-exec/main.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/builtin/provisioners/local-exec" - "github.com/hashicorp/terraform/plugin" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: localexec.Provisioner, - }) -} diff --git a/builtin/bins/provisioner-puppet/main.go b/builtin/bins/provisioner-puppet/main.go deleted file mode 100644 index 63797cb2e..000000000 --- a/builtin/bins/provisioner-puppet/main.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/builtin/provisioners/puppet" - "github.com/hashicorp/terraform/plugin" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: puppet.Provisioner, - }) -} diff --git a/builtin/bins/provisioner-remote-exec/main.go b/builtin/bins/provisioner-remote-exec/main.go deleted file mode 100644 index 83ba43a98..000000000 --- a/builtin/bins/provisioner-remote-exec/main.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" - "github.com/hashicorp/terraform/plugin" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: remoteexec.Provisioner, - }) -} diff --git a/builtin/bins/provisioner-salt-masterless/main.go b/builtin/bins/provisioner-salt-masterless/main.go deleted file mode 100644 index b7d683411..000000000 --- a/builtin/bins/provisioner-salt-masterless/main.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/hashicorp/terraform/builtin/provisioners/salt-masterless" - "github.com/hashicorp/terraform/plugin" -) - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: saltmasterless.Provisioner, - }) -} diff --git a/builtin/providers/terraform/data_source_state.go b/builtin/providers/terraform/data_source_state.go index 3cec518ad..f2fdd5122 100644 --- a/builtin/providers/terraform/data_source_state.go +++ b/builtin/providers/terraform/data_source_state.go @@ -5,6 +5,7 @@ import ( "log" "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/backend/remote" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/tfdiags" @@ -18,24 +19,42 @@ func dataSourceRemoteStateGetSchema() providers.Schema { Block: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "backend": { - Type: cty.String, - Required: true, + Type: cty.String, + Description: "The remote backend to use, e.g. `remote` or `http`.", + DescriptionKind: configschema.StringMarkdown, + Required: true, }, "config": { - Type: cty.DynamicPseudoType, - Optional: true, + Type: cty.DynamicPseudoType, + Description: "The configuration of the remote backend. " + + "Although this is optional, most backends require " + + "some configuration.\n\n" + + "The object can use any arguments that would be valid " + + "in the equivalent `terraform { backend \"\" { ... } }` " + + "block.", + DescriptionKind: configschema.StringMarkdown, + Optional: true, }, "defaults": { - Type: cty.DynamicPseudoType, - Optional: true, + Type: cty.DynamicPseudoType, + Description: "Default values for outputs, in case " + + "the state file is empty or lacks a required output.", + DescriptionKind: configschema.StringMarkdown, + Optional: true, }, "outputs": { - Type: cty.DynamicPseudoType, - Computed: true, + Type: cty.DynamicPseudoType, + Description: "An object containing every root-level " + + "output in the remote state.", + DescriptionKind: configschema.StringMarkdown, + Computed: true, }, "workspace": { - Type: cty.String, - Optional: true, + Type: cty.String, + Description: "The Terraform workspace to use, if " + + "the backend supports workspaces.", + DescriptionKind: configschema.StringMarkdown, + Optional: true, }, }, }, @@ -215,6 +234,12 @@ func getBackend(cfg cty.Value) (backend.Backend, cty.Value, tfdiags.Diagnostics) return nil, cty.NilVal, diags } + // If this is the enhanced remote backend, we want to disable the version + // check, because this is a read-only operation + if rb, ok := b.(*remote.Remote); ok { + rb.IgnoreVersionConflict() + } + return b, newVal, diags } diff --git a/builtin/providers/terraform/flatten.go b/builtin/providers/terraform/flatten.go deleted file mode 100644 index 4766a4f5e..000000000 --- a/builtin/providers/terraform/flatten.go +++ /dev/null @@ -1,76 +0,0 @@ -package terraform - -import ( - "fmt" - "reflect" -) - -// remoteStateFlatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// The difference between this version and the version in package flatmap is that -// we add the count key for maps in this version, and return a normal -// map[string]string instead of a flatmap.Map -func remoteStateFlatten(thing map[string]interface{}) map[string]string { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return result -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - mapKeys := v.MapKeys() - - result[fmt.Sprintf("%s.%%", prefix)] = fmt.Sprintf("%d", len(mapKeys)) - for _, k := range mapKeys { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/builtin/providers/terraform/provider.go b/builtin/providers/terraform/provider.go index 605362e3d..e33f14a71 100644 --- a/builtin/providers/terraform/provider.go +++ b/builtin/providers/terraform/provider.go @@ -17,7 +17,7 @@ type Provider struct { } // NewProvider returns a new terraform provider -func NewProvider() *Provider { +func NewProvider() providers.Interface { return &Provider{} } diff --git a/builtin/providers/terraform/provider_test.go b/builtin/providers/terraform/provider_test.go index 2a3a2bfe9..fecf720d2 100644 --- a/builtin/providers/terraform/provider_test.go +++ b/builtin/providers/terraform/provider_test.go @@ -1,29 +1,10 @@ package terraform import ( - "testing" - - "github.com/hashicorp/terraform/providers" - backendInit "github.com/hashicorp/terraform/backend/init" ) -var testAccProviders map[string]*Provider -var testAccProvider *Provider - func init() { // Initialize the backends backendInit.Init(nil) - - testAccProvider = NewProvider() - testAccProviders = map[string]*Provider{ - "terraform": testAccProvider, - } -} - -func TestProvider_impl(t *testing.T) { - var _ providers.Interface = NewProvider() -} - -func testAccPreCheck(t *testing.T) { } diff --git a/builtin/providers/test/data_source.go b/builtin/providers/test/data_source.go deleted file mode 100644 index 2a735703f..000000000 --- a/builtin/providers/test/data_source.go +++ /dev/null @@ -1,63 +0,0 @@ -package test - -import ( - "time" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testDataSource() *schema.Resource { - return &schema.Resource{ - Read: testDataSourceRead, - - Schema: map[string]*schema.Schema{ - "list": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "input": { - Type: schema.TypeString, - Optional: true, - }, - - "output": { - Type: schema.TypeString, - Computed: true, - }, - // this attribute is computed, but never set by the provider - "nil": { - Type: schema.TypeString, - Computed: true, - }, - - "input_map": { - Type: schema.TypeMap, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "output_map": { - Type: schema.TypeMap, - Elem: &schema.Schema{Type: schema.TypeString}, - Computed: true, - }, - }, - } -} - -func testDataSourceRead(d *schema.ResourceData, meta interface{}) error { - d.SetId(time.Now().UTC().String()) - d.Set("list", []interface{}{"one", "two", "three"}) - - if input, hasInput := d.GetOk("input"); hasInput { - d.Set("output", input) - } else { - d.Set("output", "some output") - } - - if inputMap, hasInput := d.GetOk("input_map"); hasInput { - d.Set("output_map", inputMap) - } - return nil -} diff --git a/builtin/providers/test/data_source_label.go b/builtin/providers/test/data_source_label.go deleted file mode 100644 index 40f3bad58..000000000 --- a/builtin/providers/test/data_source_label.go +++ /dev/null @@ -1,25 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func providerLabelDataSource() *schema.Resource { - return &schema.Resource{ - Read: providerLabelDataSourceRead, - - Schema: map[string]*schema.Schema{ - "label": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func providerLabelDataSourceRead(d *schema.ResourceData, meta interface{}) error { - label := meta.(string) - d.SetId(label) - d.Set("label", label) - return nil -} diff --git a/builtin/providers/test/data_source_label_test.go b/builtin/providers/test/data_source_label_test.go deleted file mode 100644 index d98a27b06..000000000 --- a/builtin/providers/test/data_source_label_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package test - -import ( - "errors" - "fmt" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestProviderLabelDataSource(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -provider "test" { - label = "foo" -} - -data "test_provider_label" "test" { -} - `), - Check: func(s *terraform.State) error { - res, hasRes := s.RootModule().Resources["data.test_provider_label.test"] - if !hasRes { - return errors.New("No test_provider_label in state") - } - if got, want := res.Primary.ID, "foo"; got != want { - return fmt.Errorf("wrong id %q; want %q", got, want) - } - if got, want := res.Primary.Attributes["label"], "foo"; got != want { - return fmt.Errorf("wrong id %q; want %q", got, want) - } - return nil - }, - }, - }, - }) -} diff --git a/builtin/providers/test/data_source_test.go b/builtin/providers/test/data_source_test.go deleted file mode 100644 index c0a1ae57c..000000000 --- a/builtin/providers/test/data_source_test.go +++ /dev/null @@ -1,291 +0,0 @@ -package test - -import ( - "errors" - "fmt" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestDataSource_dataSourceCount(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -data "test_data_source" "test" { - count = 3 - input = "count-${count.index}" -} - -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - - list = "${data.test_data_source.test.*.output}" -} - `), - Check: func(s *terraform.State) error { - res, hasRes := s.RootModule().Resources["test_resource.foo"] - if !hasRes { - return errors.New("No test_resource.foo in state") - } - if res.Primary.Attributes["list.#"] != "3" { - return errors.New("Wrong list.#, expected 3") - } - if res.Primary.Attributes["list.0"] != "count-0" { - return errors.New("Wrong list.0, expected count-0") - } - if res.Primary.Attributes["list.1"] != "count-1" { - return errors.New("Wrong list.0, expected count-1") - } - if res.Primary.Attributes["list.2"] != "count-2" { - return errors.New("Wrong list.0, expected count-2") - } - return nil - }, - }, - }, - }) -} - -// Test that the output of a data source can be used as the value for -// a "count" in a real resource. This would fail with "count cannot be computed" -// at some point. -func TestDataSource_valueAsResourceCount(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -data "test_data_source" "test" { - input = "4" -} - -resource "test_resource" "foo" { - count = "${data.test_data_source.test.output}" - - required = "yep" - required_map = { - key = "value" - } -} - `), - Check: func(s *terraform.State) error { - count := 0 - for k, _ := range s.RootModule().Resources { - if strings.HasPrefix(k, "test_resource.foo.") { - count++ - } - } - - if count != 4 { - return fmt.Errorf("bad count: %d", count) - } - return nil - }, - }, - }, - }) -} - -// TestDataSource_dataSourceCountGrandChild tests that a grandchild data source -// that is based off of count works, ie: dependency chain foo -> bar -> baz. -// This was failing because CountBoundaryTransformer is being run during apply -// instead of plan, which meant that it wasn't firing after data sources were -// potentially changing state and causing diff/interpolation issues. -// -// This happens after the initial apply, after state is saved. -func TestDataSource_dataSourceCountGrandChild(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: dataSourceCountGrandChildConfig, - }, - { - Config: dataSourceCountGrandChildConfig, - Check: func(s *terraform.State) error { - for _, v := range []string{"foo", "bar", "baz"} { - count := 0 - for k := range s.RootModule().Resources { - if strings.HasPrefix(k, fmt.Sprintf("data.test_data_source.%s.", v)) { - count++ - } - } - - if count != 2 { - return fmt.Errorf("bad count for data.test_data_source.%s: %d", v, count) - } - } - return nil - }, - }, - }, - }) -} - -const dataSourceCountGrandChildConfig = ` -data "test_data_source" "foo" { - count = 2 - input = "one" -} - -data "test_data_source" "bar" { - count = "${length(data.test_data_source.foo.*.id)}" - input = "${data.test_data_source.foo.*.output[count.index]}" -} - -data "test_data_source" "baz" { - count = "${length(data.test_data_source.bar.*.id)}" - input = "${data.test_data_source.bar.*.output[count.index]}" -} -` - -func TestDataSource_nilComputedValues(t *testing.T) { - check := func(s *terraform.State) error { - return nil - } - - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Check: check, - Config: ` -variable "index" { - default = "d" -} - -locals { - name = { - a = "something" - b = "else" - } -} - -data "test_data_source" "x" { - input = "${lookup(local.name, var.index, local.name["a"])}" -} - -data "test_data_source" "y" { - input = data.test_data_source.x.nil == "something" ? "something" : "else" -}`, - }, - }, - }) -} - -// referencing test_data_source.one.output_map["a"] should produce an error when -// there's a count. -func TestDataSource_indexedCountOfOne(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -data "test_data_source" "one" { - count = 1 - input_map = { - "a" = "b" - } -} - -data "test_data_source" "two" { - input_map = { - "x" = data.test_data_source.one.output_map["a"] - } -} - `), - ExpectError: regexp.MustCompile("Because data.test_data_source.one has \"count\" set, its attributes must be accessed on specific instances"), - }, - }, - }) -} - -// Verify that we can destroy when a data source references something with a -// count of 1. -func TestDataSource_countRefDestroyError(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -data "test_data_source" "one" { - count = 1 - input = "a" -} - -data "test_data_source" "two" { - input = data.test_data_source.one[0].output -} - `), - }, - }, - }) -} - -func TestDataSource_planUpdate(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -resource "test_resource" "a" { - required = "first" - required_map = { - key = "1" - } - optional_force_new = "first" -} - -data "test_data_source" "a" { - input = "${test_resource.a.computed_from_required}" -} - -output "out" { - value = "${data.test_data_source.a.output}" -} - `), - }, - { - Config: strings.TrimSpace(` -resource "test_resource" "a" { - required = "second" - required_map = { - key = "1" - } - optional_force_new = "second" -} - -data "test_data_source" "a" { - input = "${test_resource.a.computed_from_required}" -} - -output "out" { - value = "${data.test_data_source.a.output}" -} - `), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("data.test_data_source.a", "output", "second"), - resource.TestCheckOutput("out", "second"), - ), - }, - }, - }) -} diff --git a/builtin/providers/test/diff_apply_test.go b/builtin/providers/test/diff_apply_test.go deleted file mode 100644 index b28e110e0..000000000 --- a/builtin/providers/test/diff_apply_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package test - -import ( - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestDiffApply_set(t *testing.T) { - priorAttrs := map[string]string{ - "id": "testID", - "egress.#": "1", - "egress.2129912301.cidr_blocks.#": "1", - "egress.2129912301.cidr_blocks.0": "10.0.0.0/8", - "egress.2129912301.description": "Egress description", - "egress.2129912301.from_port": "80", - "egress.2129912301.ipv6_cidr_blocks.#": "0", - "egress.2129912301.prefix_list_ids.#": "0", - "egress.2129912301.protocol": "tcp", - "egress.2129912301.security_groups.#": "0", - "egress.2129912301.self": "false", - "egress.2129912301.to_port": "8000", - } - - diff := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "egress.2129912301.cidr_blocks.#": {Old: "1", New: "0", NewComputed: false, NewRemoved: false}, - "egress.2129912301.cidr_blocks.0": {Old: "10.0.0.0/8", New: "", NewComputed: false, NewRemoved: true}, - "egress.2129912301.description": {Old: "Egress description", New: "", NewComputed: false, NewRemoved: true}, - "egress.2129912301.from_port": {Old: "80", New: "0", NewComputed: false, NewRemoved: true}, - "egress.2129912301.ipv6_cidr_blocks.#": {Old: "0", New: "0", NewComputed: false, NewRemoved: false}, - "egress.2129912301.prefix_list_ids.#": {Old: "0", New: "0", NewComputed: false, NewRemoved: false}, - "egress.2129912301.protocol": {Old: "tcp", New: "", NewComputed: false, NewRemoved: true}, - "egress.2129912301.security_groups.#": {Old: "0", New: "0", NewComputed: false, NewRemoved: false}, - "egress.2129912301.self": {Old: "false", New: "false", NewComputed: false, NewRemoved: true}, - "egress.2129912301.to_port": {Old: "8000", New: "0", NewComputed: false, NewRemoved: true}, - "egress.746197026.cidr_blocks.#": {Old: "", New: "1", NewComputed: false, NewRemoved: false}, - "egress.746197026.cidr_blocks.0": {Old: "", New: "10.0.0.0/8", NewComputed: false, NewRemoved: false}, - "egress.746197026.description": {Old: "", New: "New egress description", NewComputed: false, NewRemoved: false}, - "egress.746197026.from_port": {Old: "", New: "80", NewComputed: false, NewRemoved: false}, - "egress.746197026.ipv6_cidr_blocks.#": {Old: "", New: "0", NewComputed: false, NewRemoved: false}, - "egress.746197026.prefix_list_ids.#": {Old: "", New: "0", NewComputed: false, NewRemoved: false}, - "egress.746197026.protocol": {Old: "", New: "tcp", NewComputed: false, NewRemoved: false, NewExtra: "tcp"}, - "egress.746197026.security_groups.#": {Old: "", New: "0", NewComputed: false, NewRemoved: false}, - "egress.746197026.self": {Old: "", New: "false", NewComputed: false, NewRemoved: false}, - "egress.746197026.to_port": {Old: "", New: "8000", NewComputed: false, NewRemoved: false}, - // an erroneous nil diff should do nothing - "egress.111111111.to_port": nil, - }, - } - - resSchema := map[string]*schema.Schema{ - "egress": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": { - Type: schema.TypeInt, - Required: true, - }, - - "to_port": { - Type: schema.TypeInt, - Required: true, - }, - - "protocol": { - Type: schema.TypeString, - Required: true, - }, - - "cidr_blocks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "ipv6_cidr_blocks": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "prefix_list_ids": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "security_groups": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "self": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - - "description": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - } - - expected := map[string]string{ - "egress.#": "1", - "egress.746197026.cidr_blocks.#": "1", - "egress.746197026.cidr_blocks.0": "10.0.0.0/8", - "egress.746197026.description": "New egress description", - "egress.746197026.from_port": "80", "egress.746197026.ipv6_cidr_blocks.#": "0", - "egress.746197026.prefix_list_ids.#": "0", - "egress.746197026.protocol": "tcp", - "egress.746197026.security_groups.#": "0", - "egress.746197026.self": "false", - "egress.746197026.to_port": "8000", - "id": "testID", - } - - attrs, err := diff.Apply(priorAttrs, (&schema.Resource{Schema: resSchema}).CoreConfigSchema()) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(attrs, expected) { - t.Fatalf("wrong result\ngot: %s\nwant: %s\n", spew.Sdump(attrs), spew.Sdump(expected)) - } -} diff --git a/builtin/providers/test/provider.go b/builtin/providers/test/provider.go deleted file mode 100644 index 1066f37c2..000000000 --- a/builtin/providers/test/provider.go +++ /dev/null @@ -1,59 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func Provider() terraform.ResourceProvider { - return &schema.Provider{ - Schema: map[string]*schema.Schema{ - // Optional attribute to label a particular instance for a test - // that has multiple instances of this provider, so that they - // can be distinguished using the test_provider_label data source. - "label": { - Type: schema.TypeString, - Optional: true, - }, - }, - ProviderMetaSchema: map[string]*schema.Schema{ - // Optionally allow specifying information at a module-level - "foo": { - Type: schema.TypeString, - Optional: true, - }, - }, - ResourcesMap: map[string]*schema.Resource{ - "test_resource": testResource(), - "test_resource_gh12183": testResourceGH12183(), - "test_resource_with_custom_diff": testResourceCustomDiff(), - "test_resource_timeout": testResourceTimeout(), - "test_resource_diff_suppress": testResourceDiffSuppress(), - "test_resource_force_new": testResourceForceNew(), - "test_resource_nested": testResourceNested(), - "test_resource_nested_set": testResourceNestedSet(), - "test_resource_state_func": testResourceStateFunc(), - "test_resource_deprecated": testResourceDeprecated(), - "test_resource_defaults": testResourceDefaults(), - "test_resource_list": testResourceList(), - "test_resource_list_set": testResourceListSet(), - "test_resource_map": testResourceMap(), - "test_resource_computed_set": testResourceComputedSet(), - "test_resource_config_mode": testResourceConfigMode(), - "test_resource_nested_id": testResourceNestedId(), - "test_resource_provider_meta": testResourceProviderMeta(), - "test_resource_signal": testResourceSignal(), - "test_undeleteable": testResourceUndeleteable(), - "test_resource_required_min": testResourceRequiredMin(), - }, - DataSourcesMap: map[string]*schema.Resource{ - "test_data_source": testDataSource(), - "test_provider_label": providerLabelDataSource(), - }, - ConfigureFunc: providerConfigure, - } -} - -func providerConfigure(d *schema.ResourceData) (interface{}, error) { - return d.Get("label"), nil -} diff --git a/builtin/providers/test/provider_test.go b/builtin/providers/test/provider_test.go deleted file mode 100644 index 40defefac..000000000 --- a/builtin/providers/test/provider_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package test - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var testAccProviders map[string]terraform.ResourceProvider -var testAccProvider *schema.Provider - -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func init() { - testAccProvider = Provider().(*schema.Provider) - testAccProviders = map[string]terraform.ResourceProvider{ - "test": testAccProvider, - } -} diff --git a/builtin/providers/test/resource.go b/builtin/providers/test/resource.go deleted file mode 100644 index b05fcc681..000000000 --- a/builtin/providers/test/resource.go +++ /dev/null @@ -1,233 +0,0 @@ -package test - -import ( - "errors" - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResource() *schema.Resource { - return &schema.Resource{ - Create: testResourceCreate, - Read: testResourceRead, - Update: testResourceUpdate, - Delete: testResourceDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - CustomizeDiff: func(d *schema.ResourceDiff, _ interface{}) error { - if d.HasChange("optional") { - d.SetNewComputed("planned_computed") - } - return nil - }, - - Schema: map[string]*schema.Schema{ - "required": { - Type: schema.TypeString, - Required: true, - }, - "optional": { - Type: schema.TypeString, - Optional: true, - }, - "optional_bool": { - Type: schema.TypeBool, - Optional: true, - }, - "optional_force_new": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "optional_computed_map": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - }, - "optional_computed_force_new": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "optional_computed": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "computed_read_only": { - Type: schema.TypeString, - Computed: true, - }, - "computed_from_required": { - Type: schema.TypeString, - Computed: true, - ForceNew: true, - }, - "computed_read_only_force_new": { - Type: schema.TypeString, - Computed: true, - ForceNew: true, - }, - "computed_list": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "set": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "computed_set": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - "map": { - Type: schema.TypeMap, - Optional: true, - }, - "optional_map": { - Type: schema.TypeMap, - Optional: true, - }, - "required_map": { - Type: schema.TypeMap, - Required: true, - }, - "map_that_look_like_set": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "computed_map": { - Type: schema.TypeMap, - Computed: true, - }, - "list": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "list_of_map": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeMap, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - "apply_error": { - Type: schema.TypeString, - Optional: true, - Description: "return and error during apply", - }, - "planned_computed": { - Type: schema.TypeString, - Computed: true, - Description: "copied the required field during apply, and plans computed when changed", - }, - // this should return unset from GetOkExists - "get_ok_exists_false": { - Type: schema.TypeBool, - Computed: true, - Optional: true, - Description: "do not set in config", - }, - "int": { - Type: schema.TypeInt, - Optional: true, - }, - }, - } -} - -func testResourceCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - - errMsg, _ := d.Get("apply_error").(string) - if errMsg != "" { - return errors.New(errMsg) - } - - // Required must make it through to Create - if _, ok := d.GetOk("required"); !ok { - return fmt.Errorf("Missing attribute 'required', but it's required!") - } - if _, ok := d.GetOk("required_map"); !ok { - return fmt.Errorf("Missing attribute 'required_map', but it's required!") - } - - d.Set("computed_from_required", d.Get("required")) - - return testResourceRead(d, meta) -} - -func testResourceRead(d *schema.ResourceData, meta interface{}) error { - d.Set("computed_read_only", "value_from_api") - d.Set("computed_read_only_force_new", "value_from_api") - if _, ok := d.GetOk("optional_computed_map"); !ok { - d.Set("optional_computed_map", map[string]string{}) - } - d.Set("computed_map", map[string]string{"key1": "value1"}) - d.Set("computed_list", []string{"listval1", "listval2"}) - d.Set("computed_set", []string{"setval1", "setval2"}) - - d.Set("planned_computed", d.Get("optional")) - - // if there is no "set" value, erroneously set it to an empty set. This - // might change a null value to an empty set, but we should be able to - // ignore that. - s := d.Get("set") - if s == nil || s.(*schema.Set).Len() == 0 { - d.Set("set", []interface{}{}) - } - - // This mimics many providers always setting a *string value. - // The existing behavior is that this will appear in the state as an empty - // string, which we have to maintain. - o := d.Get("optional") - if o == "" { - d.Set("optional", nil) - } - - // This should not show as set unless it's set in the config - _, ok := d.GetOkExists("get_ok_exists_false") - if ok { - return errors.New("get_ok_exists_false should not be set") - } - - return nil -} - -func testResourceUpdate(d *schema.ResourceData, meta interface{}) error { - errMsg, _ := d.Get("apply_error").(string) - if errMsg != "" { - return errors.New(errMsg) - } - return testResourceRead(d, meta) -} - -func testResourceDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_computed_set.go b/builtin/providers/test/resource_computed_set.go deleted file mode 100644 index 092cd2276..000000000 --- a/builtin/providers/test/resource_computed_set.go +++ /dev/null @@ -1,123 +0,0 @@ -package test - -import ( - "bytes" - "fmt" - "math/rand" - "strings" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceComputedSet() *schema.Resource { - return &schema.Resource{ - Create: testResourceComputedSetCreate, - Read: testResourceComputedSetRead, - Delete: testResourceComputedSetDelete, - Update: testResourceComputedSetUpdate, - - CustomizeDiff: func(d *schema.ResourceDiff, _ interface{}) error { - o, n := d.GetChange("set_count") - if o != n { - d.SetNewComputed("string_set") - } - return nil - }, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "set_count": { - Type: schema.TypeInt, - Optional: true, - }, - "string_set": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, - - "rule": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - - "ip_protocol": { - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "cidr": { - Type: schema.TypeString, - Optional: true, - ForceNew: false, - StateFunc: func(v interface{}) string { - return strings.ToLower(v.(string)) - }, - }, - }, - }, - }, - "optional_set": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - } -} - -func computeSecGroupV2RuleHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string))) - buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["cidr"].(string)))) - - return hashcode.String(buf.String()) -} - -func testResourceComputedSetCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%x", rand.Int63())) - return testResourceComputedSetRead(d, meta) -} - -func testResourceComputedSetRead(d *schema.ResourceData, meta interface{}) error { - count := 3 - v, ok := d.GetOk("set_count") - if ok { - count = v.(int) - } - - var set []interface{} - for i := 0; i < count; i++ { - set = append(set, fmt.Sprintf("%d", i)) - } - - d.Set("string_set", schema.NewSet(schema.HashString, set)) - - // This isn't computed, but we should be able to ignore without issues. - d.Set("optional_set", []interface{}{}) - return nil -} - -func testResourceComputedSetUpdate(d *schema.ResourceData, meta interface{}) error { - return testResourceComputedSetRead(d, meta) -} - -func testResourceComputedSetDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_computed_set_test.go b/builtin/providers/test/resource_computed_set_test.go deleted file mode 100644 index 06e608235..000000000 --- a/builtin/providers/test/resource_computed_set_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceComputedSet_update(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_computed_set" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_computed_set.foo", "string_set.#", "3", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_computed_set" "foo" { - set_count = 5 -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_computed_set.foo", "string_set.#", "5", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_computed_set" "foo" { - set_count = 2 -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_computed_set.foo", "string_set.#", "2", - ), - ), - }, - }, - }) -} - -func TestResourceComputedSet_ruleTest(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_computed_set" "foo" { - rule { - ip_protocol = "udp" - cidr = "0.0.0.0/0" - } -} - `), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_config_mode.go b/builtin/providers/test/resource_config_mode.go deleted file mode 100644 index 82b476039..000000000 --- a/builtin/providers/test/resource_config_mode.go +++ /dev/null @@ -1,78 +0,0 @@ -package test - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceConfigMode() *schema.Resource { - return &schema.Resource{ - Create: testResourceConfigModeCreate, - Read: testResourceConfigModeRead, - Delete: testResourceConfigModeDelete, - Update: testResourceConfigModeUpdate, - - Schema: map[string]*schema.Schema{ - "resource_as_attr": { - Type: schema.TypeList, - ConfigMode: schema.SchemaConfigModeAttr, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "foo": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "nested_set": { - Type: schema.TypeSet, - Optional: true, - ConfigMode: schema.SchemaConfigModeAttr, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeString, - Optional: true, - }, - "set": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - } -} - -func testResourceConfigModeCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("placeholder") - return testResourceConfigModeRead(d, meta) -} - -func testResourceConfigModeRead(d *schema.ResourceData, meta interface{}) error { - if l, ok := d.Get("resource_as_attr").([]interface{}); !ok { - return fmt.Errorf("resource_as_attr should appear as []interface{}, not %T", l) - } else { - for i, item := range l { - if _, ok := item.(map[string]interface{}); !ok { - return fmt.Errorf("resource_as_attr[%d] should appear as map[string]interface{}, not %T", i, item) - } - } - } - return nil -} - -func testResourceConfigModeUpdate(d *schema.ResourceData, meta interface{}) error { - return testResourceConfigModeRead(d, meta) -} - -func testResourceConfigModeDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_config_mode_test.go b/builtin/providers/test/resource_config_mode_test.go deleted file mode 100644 index f73adc8ff..000000000 --- a/builtin/providers/test/resource_config_mode_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceConfigMode(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_config_mode" "foo" { - resource_as_attr = [ - { - foo = "resource_as_attr 0" - }, - { - foo = "resource_as_attr 1" - }, - ] -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#", "2"), - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.0.foo", "resource_as_attr 0"), - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.1.foo", "resource_as_attr 1"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_config_mode" "foo" { - # Due to a preprocessing fixup we do in lang.EvalBlock, it's allowed - # to specify resource_as_attr members using one or more nested blocks - # instead of attribute syntax, if desired. This should be equivalent - # to the previous config. - # - # This allowance is made for backward-compatibility with existing providers - # before Terraform v0.12 that were expecting nested block types to also - # support attribute syntax; it should not be used for any new use-cases. - resource_as_attr { - foo = "resource_as_attr 0" - } - resource_as_attr { - foo = "resource_as_attr 1" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#", "2"), - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.0.foo", "resource_as_attr 0"), - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.1.foo", "resource_as_attr 1"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_config_mode" "foo" { - resource_as_attr = [ - { - foo = "resource_as_attr 0 updated" - }, - ] -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#", "1"), - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.0.foo", "resource_as_attr 0 updated"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_config_mode" "foo" { - resource_as_attr = [] -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#", "0"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_config_mode" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr("test_resource_config_mode.foo", "resource_as_attr.#"), - ), - }, - }, - }) -} - -func TestResourceConfigMode_nestedSet(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_config_mode" "foo" { - resource_as_attr = [] - - nested_set { - value = "a" - } - nested_set { - value = "b" - set = [] - } -} - `), - Check: resource.ComposeTestCheckFunc(), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_data_dep_test.go b/builtin/providers/test/resource_data_dep_test.go deleted file mode 100644 index 0cd773d53..000000000 --- a/builtin/providers/test/resource_data_dep_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package test - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// TestResourceDataDep_alignedCountScaleOut tests to make sure interpolation -// works (namely without index errors) when a data source and a resource share -// the same count variable during scale-out with an existing state. -func TestResourceDataDep_alignedCountScaleOut(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: testResourceDataDepConfig(2), - }, - { - Config: testResourceDataDepConfig(4), - Check: resource.TestCheckOutput("out", "value_from_api,value_from_api,value_from_api,value_from_api"), - }, - }, - }) -} - -// TestResourceDataDep_alignedCountScaleIn tests to make sure interpolation -// works (namely without index errors) when a data source and a resource share -// the same count variable during scale-in with an existing state. -func TestResourceDataDep_alignedCountScaleIn(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: testResourceDataDepConfig(4), - }, - { - Config: testResourceDataDepConfig(2), - Check: resource.TestCheckOutput("out", "value_from_api,value_from_api"), - }, - }, - }) -} - -// TestDataResourceDep_alignedCountScaleOut functions like -// TestResourceDataDep_alignedCountScaleOut, but with the dependencies swapped -// (resource now depends on data source, a pretty regular use case, but -// included here to check for regressions). -func TestDataResourceDep_alignedCountScaleOut(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: testDataResourceDepConfig(2), - }, - { - Config: testDataResourceDepConfig(4), - Check: resource.TestCheckOutput("out", "test,test,test,test"), - }, - }, - }) -} - -// TestDataResourceDep_alignedCountScaleIn functions like -// TestResourceDataDep_alignedCountScaleIn, but with the dependencies swapped -// (resource now depends on data source, a pretty regular use case, but -// included here to check for regressions). -func TestDataResourceDep_alignedCountScaleIn(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: testDataResourceDepConfig(4), - }, - { - Config: testDataResourceDepConfig(2), - Check: resource.TestCheckOutput("out", "test,test"), - }, - }, - }) -} - -// TestResourceResourceDep_alignedCountScaleOut functions like -// TestResourceDataDep_alignedCountScaleOut, but with a resource-to-resource -// dependency instead, a pretty regular use case, but included here to check -// for regressions. -func TestResourceResourceDep_alignedCountScaleOut(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: testResourceResourceDepConfig(2), - }, - { - Config: testResourceResourceDepConfig(4), - Check: resource.TestCheckOutput("out", "test,test,test,test"), - }, - }, - }) -} - -// TestResourceResourceDep_alignedCountScaleIn functions like -// TestResourceDataDep_alignedCountScaleIn, but with a resource-to-resource -// dependency instead, a pretty regular use case, but included here to check -// for regressions. -func TestResourceResourceDep_alignedCountScaleIn(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: func(s *terraform.State) error { - return nil - }, - Steps: []resource.TestStep{ - { - Config: testResourceResourceDepConfig(4), - }, - { - Config: testResourceResourceDepConfig(2), - Check: resource.TestCheckOutput("out", "test,test"), - }, - }, - }) -} - -func testResourceDataDepConfig(count int) string { - return fmt.Sprintf(` -variable num { - default = "%d" -} - -resource "test_resource" "foo" { - count = "${var.num}" - required = "yes" - - required_map = { - "foo" = "bar" - } -} - -data "test_data_source" "bar" { - count = "${var.num}" - input = "${test_resource.foo.*.computed_read_only[count.index]}" -} - -output "out" { - value = "${join(",", data.test_data_source.bar.*.output)}" -} -`, count) -} - -func testDataResourceDepConfig(count int) string { - return fmt.Sprintf(` -variable num { - default = "%d" -} - -data "test_data_source" "foo" { - count = "${var.num}" - input = "test" -} - -resource "test_resource" "bar" { - count = "${var.num}" - required = "yes" - optional = "${data.test_data_source.foo.*.output[count.index]}" - - required_map = { - "foo" = "bar" - } -} - -output "out" { - value = "${join(",", test_resource.bar.*.optional)}" -} -`, count) -} - -func testResourceResourceDepConfig(count int) string { - return fmt.Sprintf(` -variable num { - default = "%d" -} - -resource "test_resource" "foo" { - count = "${var.num}" - required = "yes" - optional = "test" - - required_map = { - "foo" = "bar" - } -} - -resource "test_resource" "bar" { - count = "${var.num}" - required = "yes" - optional = "${test_resource.foo.*.optional[count.index]}" - - required_map = { - "foo" = "bar" - } -} - -output "out" { - value = "${join(",", test_resource.bar.*.optional)}" -} -`, count) -} diff --git a/builtin/providers/test/resource_dataproc_cluster_test.go b/builtin/providers/test/resource_dataproc_cluster_test.go deleted file mode 100644 index 3d5a2282f..000000000 --- a/builtin/providers/test/resource_dataproc_cluster_test.go +++ /dev/null @@ -1,491 +0,0 @@ -package test - -import ( - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -var dataprocClusterSchema = map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "region": { - Type: schema.TypeString, - Optional: true, - Default: "global", - ForceNew: true, - }, - - "labels": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - // GCP automatically adds two labels - // 'goog-dataproc-cluster-uuid' - // 'goog-dataproc-cluster-name' - Computed: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if old != "" { - return true - } - return false - }, - }, - - "tag_set": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - - "cluster_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - - "delete_autogen_bucket": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Removed: "If you need a bucket that can be deleted, please create" + - "a new one and set the `staging_bucket` field", - }, - - "staging_bucket": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "bucket": { - Type: schema.TypeString, - Computed: true, - }, - - "gce_cluster_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - - "zone": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "network": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.subnetwork"}, - }, - - "subnetwork": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.network"}, - }, - - "tags": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "service_account": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "service_account_scopes": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "internal_ip_only": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - }, - - "metadata": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, - }, - }, - }, - }, - - "master_config": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "num_instances": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "image_uri": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "machine_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "disk_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "num_local_ssds": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "boot_disk_size_gb": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "boot_disk_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "pd-standard", - }, - }, - }, - }, - "accelerators": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "accelerator_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "accelerator_count": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - }, - }, - }, - "instance_names": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - "preemptible_worker_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "num_instances": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - "disk_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "num_local_ssds": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "boot_disk_size_gb": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "boot_disk_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: "pd-standard", - }, - }, - }, - }, - - "instance_names": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - - "software_config": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "image_version": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "override_properties": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - - "properties": { - Type: schema.TypeMap, - Computed: true, - }, - }, - }, - }, - - "initialization_action": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "script": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "timeout_sec": { - Type: schema.TypeInt, - Optional: true, - Default: 300, - ForceNew: true, - }, - }, - }, - }, - "encryption_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "kms_key_name": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - }, -} - -func TestDiffApply_dataprocCluster(t *testing.T) { - priorAttrs := map[string]string{ - "cluster_config.#": "1", - "cluster_config.0.bucket": "dataproc-1dc18cb2-116e-4e92-85ea-ff63a1bf2745-us-central1", - "cluster_config.0.delete_autogen_bucket": "false", - "cluster_config.0.encryption_config.#": "0", - "cluster_config.0.gce_cluster_config.#": "1", - "cluster_config.0.gce_cluster_config.0.internal_ip_only": "false", - "cluster_config.0.gce_cluster_config.0.metadata.%": "0", - "cluster_config.0.gce_cluster_config.0.network": "https://www.googleapis.com/compute/v1/projects/hc-terraform-testing/global/networks/default", - "cluster_config.0.gce_cluster_config.0.service_account": "", - "cluster_config.0.gce_cluster_config.0.service_account_scopes.#": "7", - "cluster_config.0.gce_cluster_config.0.service_account_scopes.1245378569": "https://www.googleapis.com/auth/bigtable.admin.table", - "cluster_config.0.gce_cluster_config.0.service_account_scopes.1328717722": "https://www.googleapis.com/auth/devstorage.read_write", - "cluster_config.0.gce_cluster_config.0.service_account_scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control", - "cluster_config.0.gce_cluster_config.0.service_account_scopes.172152165": "https://www.googleapis.com/auth/logging.write", - "cluster_config.0.gce_cluster_config.0.service_account_scopes.2401844655": "https://www.googleapis.com/auth/bigquery", - "cluster_config.0.gce_cluster_config.0.service_account_scopes.299921284": "https://www.googleapis.com/auth/bigtable.data", - "cluster_config.0.gce_cluster_config.0.service_account_scopes.3804780973": "https://www.googleapis.com/auth/cloud.useraccounts.readonly", - "cluster_config.0.gce_cluster_config.0.subnetwork": "", - "cluster_config.0.gce_cluster_config.0.tags.#": "0", - "cluster_config.0.gce_cluster_config.0.zone": "us-central1-f", - "cluster_config.0.initialization_action.#": "0", - "cluster_config.0.master_config.#": "1", - "cluster_config.0.master_config.0.accelerators.#": "0", - "cluster_config.0.master_config.0.disk_config.#": "1", - "cluster_config.0.master_config.0.disk_config.0.boot_disk_size_gb": "500", - "cluster_config.0.master_config.0.disk_config.0.boot_disk_type": "pd-standard", - "cluster_config.0.master_config.0.disk_config.0.num_local_ssds": "0", - "cluster_config.0.master_config.0.image_uri": "https://www.googleapis.com/compute/v1/projects/cloud-dataproc/global/images/dataproc-1-3-deb9-20190228-000000-rc01", - "cluster_config.0.master_config.0.instance_names.#": "1", - "cluster_config.0.master_config.0.instance_names.0": "dproc-cluster-test-2ww3c60iww-m", - "cluster_config.0.master_config.0.machine_type": "n1-standard-4", - "cluster_config.0.master_config.0.num_instances": "1", - "cluster_config.0.preemptible_worker_config.#": "1", - "cluster_config.0.preemptible_worker_config.0.disk_config.#": "1", - "cluster_config.0.preemptible_worker_config.0.instance_names.#": "0", - "cluster_config.0.preemptible_worker_config.0.num_instances": "0", - "cluster_config.0.software_config.#": "1", - "cluster_config.0.software_config.0.image_version": "1.3.28-deb9", - "cluster_config.0.software_config.0.override_properties.%": "0", - "cluster_config.0.software_config.0.properties.%": "14", - "cluster_config.0.software_config.0.properties.capacity-scheduler:yarn.scheduler.capacity.root.default.ordering-policy": "fair", - "cluster_config.0.software_config.0.properties.core:fs.gs.block.size": "134217728", - "cluster_config.0.software_config.0.properties.core:fs.gs.metadata.cache.enable": "false", - "cluster_config.0.software_config.0.properties.core:hadoop.ssl.enabled.protocols": "TLSv1,TLSv1.1,TLSv1.2", - "cluster_config.0.software_config.0.properties.distcp:mapreduce.map.java.opts": "-Xmx768m", - "cluster_config.0.software_config.0.properties.distcp:mapreduce.map.memory.mb": "1024", - "cluster_config.0.software_config.0.properties.distcp:mapreduce.reduce.java.opts": "-Xmx768m", - "cluster_config.0.software_config.0.properties.distcp:mapreduce.reduce.memory.mb": "1024", - "cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.address": "0.0.0.0:9866", - "cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.http.address": "0.0.0.0:9864", - "cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.https.address": "0.0.0.0:9865", - "cluster_config.0.software_config.0.properties.hdfs:dfs.datanode.ipc.address": "0.0.0.0:9867", - "cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.handler.count": "20", - "cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.http-address": "0.0.0.0:9870", - "cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.https-address": "0.0.0.0:9871", - "cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.lifeline.rpc-address": "dproc-cluster-test-2ww3c60iww-m:8050", - "cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.secondary.http-address": "0.0.0.0:9868", - "cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.secondary.https-address": "0.0.0.0:9869", - "cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.service.handler.count": "10", - "cluster_config.0.software_config.0.properties.hdfs:dfs.namenode.servicerpc-address": "dproc-cluster-test-2ww3c60iww-m:8051", - "cluster_config.0.software_config.0.properties.mapred-env:HADOOP_JOB_HISTORYSERVER_HEAPSIZE": "3840", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.job.maps": "21", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.job.reduce.slowstart.completedmaps": "0.95", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.job.reduces": "7", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.map.cpu.vcores": "1", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.map.java.opts": "-Xmx2457m", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.map.memory.mb": "3072", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.cpu.vcores": "1", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.java.opts": "-Xmx2457m", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.reduce.memory.mb": "3072", - "cluster_config.0.software_config.0.properties.mapred:mapreduce.task.io.sort.mb": "256", - "cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.command-opts": "-Xmx2457m", - "cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.resource.cpu-vcores": "1", - "cluster_config.0.software_config.0.properties.mapred:yarn.app.mapreduce.am.resource.mb": "3072", - "cluster_config.0.software_config.0.properties.presto-jvm:MaxHeapSize": "12288m", - "cluster_config.0.software_config.0.properties.presto:query.max-memory-per-node": "7372MB", - "cluster_config.0.software_config.0.properties.presto:query.max-total-memory-per-node": "7372MB", - "cluster_config.0.software_config.0.properties.spark-env:SPARK_DAEMON_MEMORY": "3840m", - "cluster_config.0.software_config.0.properties.spark:spark.driver.maxResultSize": "1920m", - "cluster_config.0.software_config.0.properties.spark:spark.driver.memory": "3840m", - "cluster_config.0.software_config.0.properties.spark:spark.executor.cores": "2", - "cluster_config.0.software_config.0.properties.spark:spark.executor.instances": "2", - "cluster_config.0.software_config.0.properties.spark:spark.executor.memory": "5586m", - "cluster_config.0.software_config.0.properties.spark:spark.executorEnv.OPENBLAS_NUM_THREADS": "1", - "cluster_config.0.software_config.0.properties.spark:spark.scheduler.mode": "FAIR", - "cluster_config.0.software_config.0.properties.spark:spark.sql.cbo.enabled": "true", - "cluster_config.0.software_config.0.properties.spark:spark.yarn.am.memory": "640m", - "cluster_config.0.software_config.0.properties.yarn-env:YARN_TIMELINESERVER_HEAPSIZE": "3840", - "cluster_config.0.software_config.0.properties.yarn:yarn.nodemanager.resource.memory-mb": "12288", - "cluster_config.0.software_config.0.properties.yarn:yarn.resourcemanager.nodemanager-graceful-decommission-timeout-secs": "86400", - "cluster_config.0.software_config.0.properties.yarn:yarn.scheduler.maximum-allocation-mb": "12288", - "cluster_config.0.software_config.0.properties.yarn:yarn.scheduler.minimum-allocation-mb": "1024", - "cluster_config.0.staging_bucket": "", - "id": "dproc-cluster-test-ktbyrniu4e", - "labels.%": "4", - "labels.goog-dataproc-cluster-name": "dproc-cluster-test-ktbyrniu4e", - "labels.goog-dataproc-cluster-uuid": "d576c4e0-8fda-4ad1-abf5-ec951ab25855", - "labels.goog-dataproc-location": "us-central1", - "labels.key1": "value1", - "tag_set.#": "0", - } - - diff := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "labels.%": &terraform.ResourceAttrDiff{Old: "4", New: "1", NewComputed: false, NewRemoved: false, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0}, - "labels.goog-dataproc-cluster-name": &terraform.ResourceAttrDiff{Old: "dproc-cluster-test-ktbyrniu4e", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0}, - "labels.goog-dataproc-cluster-uuid": &terraform.ResourceAttrDiff{Old: "d576c4e0-8fda-4ad1-abf5-ec951ab25855", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0}, - "labels.goog-dataproc-location": &terraform.ResourceAttrDiff{Old: "us-central1", New: "", NewComputed: false, NewRemoved: true, NewExtra: interface{}(nil), RequiresNew: false, Sensitive: false, Type: 0x0}, - }, - } - - newAttrs, err := diff.Apply(priorAttrs, (&schema.Resource{Schema: dataprocClusterSchema}).CoreConfigSchema()) - if err != nil { - t.Fatal(err) - } - - // the diff'ed labale elements should be removed - delete(priorAttrs, "labels.goog-dataproc-cluster-name") - delete(priorAttrs, "labels.goog-dataproc-cluster-uuid") - delete(priorAttrs, "labels.goog-dataproc-location") - priorAttrs["labels.%"] = "1" - - // the missing required "name" should be added - priorAttrs["name"] = "" - - if !reflect.DeepEqual(priorAttrs, newAttrs) { - t.Fatal(cmp.Diff(priorAttrs, newAttrs)) - } -} diff --git a/builtin/providers/test/resource_defaults.go b/builtin/providers/test/resource_defaults.go deleted file mode 100644 index 41038de68..000000000 --- a/builtin/providers/test/resource_defaults.go +++ /dev/null @@ -1,70 +0,0 @@ -package test - -import ( - "fmt" - "math/rand" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceDefaults() *schema.Resource { - return &schema.Resource{ - Create: testResourceDefaultsCreate, - Read: testResourceDefaultsRead, - Delete: testResourceDefaultsDelete, - Update: testResourceDefaultsUpdate, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "default_string": { - Type: schema.TypeString, - Optional: true, - Default: "default string", - }, - "default_bool": { - Type: schema.TypeString, - Optional: true, - Default: true, - }, - "nested": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "string": { - Type: schema.TypeString, - Optional: true, - Default: "default nested", - }, - "optional": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - } -} - -func testResourceDefaultsCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%x", rand.Int63())) - return testResourceDefaultsRead(d, meta) -} - -func testResourceDefaultsUpdate(d *schema.ResourceData, meta interface{}) error { - return testResourceDefaultsRead(d, meta) -} - -func testResourceDefaultsRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceDefaultsDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_defaults_test.go b/builtin/providers/test/resource_defaults_test.go deleted file mode 100644 index 8aabd4482..000000000 --- a/builtin/providers/test/resource_defaults_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceDefaults_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_defaults" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_string", "default string", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_bool", "1", - ), - resource.TestCheckNoResourceAttr( - "test_resource_defaults.foo", "nested.#", - ), - ), - }, - }, - }) -} - -func TestResourceDefaults_change(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -resource "test_resource_defaults" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_string", "default string", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_bool", "1", - ), - resource.TestCheckNoResourceAttr( - "test_resource_defaults.foo", "nested.#", - ), - ), - }, - { - Config: strings.TrimSpace(` -resource "test_resource_defaults" "foo" { - default_string = "new" - default_bool = false - nested { - optional = "nested" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_string", "new", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_bool", "false", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "nested.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "nested.2950978312.optional", "nested", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "nested.2950978312.string", "default nested", - ), - ), - }, - { - Config: strings.TrimSpace(` -resource "test_resource_defaults" "foo" { - default_string = "new" - default_bool = false - nested { - optional = "nested" - string = "new" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_string", "new", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_bool", "false", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "nested.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "nested.782850362.optional", "nested", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "nested.782850362.string", "new", - ), - ), - }, - }, - }) -} - -func TestResourceDefaults_inSet(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_defaults" "foo" { - nested { - optional = "val" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_string", "default string", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "default_bool", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "nested.2826070548.optional", "val", - ), - resource.TestCheckResourceAttr( - "test_resource_defaults.foo", "nested.2826070548.string", "default nested", - ), - ), - }, - }, - }) -} - -func TestDefaults_emptyString(t *testing.T) { - config := ` -resource "test_resource_defaults" "test" { - default_string = "" -} -` - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_defaults.test", "default_string", ""), - ), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_deprecated.go b/builtin/providers/test/resource_deprecated.go deleted file mode 100644 index a176977b9..000000000 --- a/builtin/providers/test/resource_deprecated.go +++ /dev/null @@ -1,119 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceDeprecated() *schema.Resource { - return &schema.Resource{ - Create: testResourceDeprecatedCreate, - Read: testResourceDeprecatedRead, - Update: testResourceDeprecatedUpdate, - Delete: testResourceDeprecatedDelete, - - Schema: map[string]*schema.Schema{ - "map_deprecated": { - Type: schema.TypeMap, - Optional: true, - Deprecated: "deprecated", - }, - "map_removed": { - Type: schema.TypeMap, - Optional: true, - Removed: "removed", - }, - "set_block_deprecated": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Deprecated: "deprecated", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeString, - Required: true, - Deprecated: "deprecated", - }, - "optional": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Deprecated: "deprecated", - }, - }, - }, - }, - "set_block_removed": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Removed: "Removed", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "optional": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - Removed: "removed", - }, - }, - }, - }, - "list_block_deprecated": { - Type: schema.TypeList, - Optional: true, - Deprecated: "deprecated", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeString, - Required: true, - Deprecated: "deprecated", - }, - "optional": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Deprecated: "deprecated", - }, - }, - }, - }, - "list_block_removed": { - Type: schema.TypeList, - Optional: true, - Removed: "removed", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "optional": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Removed: "removed", - }, - }, - }, - }, - }, - } -} - -func testResourceDeprecatedCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - return nil -} - -func testResourceDeprecatedRead(d *schema.ResourceData, meta interface{}) error { - - return nil -} - -func testResourceDeprecatedUpdate(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceDeprecatedDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_deprecated_test.go b/builtin/providers/test/resource_deprecated_test.go deleted file mode 100644 index 8817567d9..000000000 --- a/builtin/providers/test/resource_deprecated_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package test - -import ( - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -// an empty config should be ok, because no deprecated/removed fields are set. -func TestResourceDeprecated_empty(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_deprecated" "foo" { -} - `), - }, - }, - }) -} - -// Deprecated fields should still work -func TestResourceDeprecated_deprecatedOK(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_deprecated" "foo" { - map_deprecated = { - "a" = "b", - } - set_block_deprecated { - value = "1" - } - list_block_deprecated { - value = "2" - } -} - `), - }, - }, - }) -} - -// Declaring an empty block should trigger the error -func TestResourceDeprecated_removedBlocks(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_deprecated" "foo" { - set_block_removed { - } - list_block_removed { - } -} - `), - ExpectError: regexp.MustCompile("REMOVED"), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_diff_suppress.go b/builtin/providers/test/resource_diff_suppress.go deleted file mode 100644 index f5cfc9331..000000000 --- a/builtin/providers/test/resource_diff_suppress.go +++ /dev/null @@ -1,104 +0,0 @@ -package test - -import ( - "fmt" - "math/rand" - "strings" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceDiffSuppress() *schema.Resource { - diffSuppress := func(k, old, new string, d *schema.ResourceData) bool { - if old == "" || strings.Contains(new, "replace") { - return false - } - return true - } - - return &schema.Resource{ - Create: testResourceDiffSuppressCreate, - Read: testResourceDiffSuppressRead, - Delete: testResourceDiffSuppressDelete, - Update: testResourceDiffSuppressUpdate, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "optional": { - Type: schema.TypeString, - Optional: true, - }, - "val_to_upper": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(val interface{}) string { - return strings.ToUpper(val.(string)) - }, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - return strings.ToUpper(old) == strings.ToUpper(new) - }, - }, - "network": { - Type: schema.TypeString, - Optional: true, - Default: "default", - ForceNew: true, - DiffSuppressFunc: diffSuppress, - }, - "subnetwork": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - DiffSuppressFunc: diffSuppress, - }, - - "node_pool": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - }, - }, - }, - } -} - -func testResourceDiffSuppressCreate(d *schema.ResourceData, meta interface{}) error { - d.Set("network", "modified") - d.Set("subnetwork", "modified") - - if _, ok := d.GetOk("node_pool"); !ok { - d.Set("node_pool", []string{}) - } - - id := fmt.Sprintf("%x", rand.Int63()) - d.SetId(id) - return nil -} - -func testResourceDiffSuppressRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceDiffSuppressUpdate(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceDiffSuppressDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_diff_suppress_test.go b/builtin/providers/test/resource_diff_suppress_test.go deleted file mode 100644 index 89416f32a..000000000 --- a/builtin/providers/test/resource_diff_suppress_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package test - -import ( - "errors" - "strings" - "testing" - - "github.com/hashicorp/terraform/addrs" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceDiffSuppress_create(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_diff_suppress" "foo" { - val_to_upper = "foo" -} - `), - }, - }, - }) -} -func TestResourceDiffSuppress_update(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_diff_suppress" "foo" { - val_to_upper = "foo" -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_diff_suppress" "foo" { - val_to_upper = "bar" - optional = "more" -} - `), - }, - }, - }) -} - -func TestResourceDiffSuppress_updateIgnoreChanges(t *testing.T) { - // None of these steps should replace the instance - id := "" - checkFunc := func(s *terraform.State) error { - root := s.ModuleByPath(addrs.RootModuleInstance) - res := root.Resources["test_resource_diff_suppress.foo"] - if id != "" && res.Primary.ID != id { - return errors.New("expected no resource replacement") - } - id = res.Primary.ID - return nil - } - - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_diff_suppress" "foo" { - val_to_upper = "foo" - - network = "foo" - subnetwork = "foo" - - node_pool { - name = "default-pool" - } - lifecycle { - ignore_changes = ["node_pool"] - } -} - `), - Check: checkFunc, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_diff_suppress" "foo" { - val_to_upper = "foo" - - network = "ignored" - subnetwork = "ignored" - - node_pool { - name = "default-pool" - } - lifecycle { - ignore_changes = ["node_pool"] - } -} - `), - Check: checkFunc, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_diff_suppress" "foo" { - val_to_upper = "foo" - - network = "ignored" - subnetwork = "ignored" - - node_pool { - name = "ignored" - } - lifecycle { - ignore_changes = ["node_pool"] - } -} - `), - Check: checkFunc, - }, - }, - }) -} diff --git a/builtin/providers/test/resource_force_new.go b/builtin/providers/test/resource_force_new.go deleted file mode 100644 index 81a06736c..000000000 --- a/builtin/providers/test/resource_force_new.go +++ /dev/null @@ -1,39 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceForceNew() *schema.Resource { - return &schema.Resource{ - Create: testResourceForceNewCreate, - Read: testResourceForceNewRead, - Delete: testResourceForceNewDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "triggers": { - Type: schema.TypeMap, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func testResourceForceNewCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - return testResourceForceNewRead(d, meta) -} - -func testResourceForceNewRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceForceNewDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_force_new_test.go b/builtin/providers/test/resource_force_new_test.go deleted file mode 100644 index 3e0bf19c3..000000000 --- a/builtin/providers/test/resource_force_new_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceForceNew_create(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_force_new" "foo" { - triggers = { - "a" = "foo" - } -}`), - }, - }, - }) -} -func TestResourceForceNew_update(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_force_new" "foo" { - triggers = { - "a" = "foo" - } -}`), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_force_new" "foo" { - triggers = { - "a" = "bar" - } -}`), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_force_new" "foo" { - triggers = { - "b" = "bar" - } -}`), - }, - }, - }) -} - -func TestResourceForceNew_remove(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_force_new" "foo" { - triggers = { - "a" = "bar" - } -}`), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_force_new" "foo" { -} `), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_gh12183.go b/builtin/providers/test/resource_gh12183.go deleted file mode 100644 index d67bcf755..000000000 --- a/builtin/providers/test/resource_gh12183.go +++ /dev/null @@ -1,64 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -// This is a test resource to help reproduce GH-12183. This issue came up -// as a complex mixing of core + helper/schema and while we added core tests -// to cover some of the cases, this test helps top it off with an end-to-end -// test. -func testResourceGH12183() *schema.Resource { - return &schema.Resource{ - Create: testResourceCreate_gh12183, - Read: testResourceRead_gh12183, - Update: testResourceUpdate_gh12183, - Delete: testResourceDelete_gh12183, - Schema: map[string]*schema.Schema{ - "key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "config": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - - "rules": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - }, - } -} - -func testResourceCreate_gh12183(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - return testResourceRead_gh12183(d, meta) -} - -func testResourceRead_gh12183(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceUpdate_gh12183(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceDelete_gh12183(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_gh12183_test.go b/builtin/providers/test/resource_gh12183_test.go deleted file mode 100644 index 9cf100587..000000000 --- a/builtin/providers/test/resource_gh12183_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -// Tests GH-12183. This would previously cause a crash. More granular -// unit tests are scattered through helper/schema and terraform core for -// this. -func TestResourceGH12183_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_gh12183" "a" { - config { - name = "hello" - } -} - -resource "test_resource_gh12183" "b" { - key = "${lookup(test_resource_gh12183.a.config[0], "name")}" - config { - name = "required" - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} diff --git a/builtin/providers/test/resource_list.go b/builtin/providers/test/resource_list.go deleted file mode 100644 index 895298ebb..000000000 --- a/builtin/providers/test/resource_list.go +++ /dev/null @@ -1,192 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceList() *schema.Resource { - return &schema.Resource{ - Create: testResourceListCreate, - Read: testResourceListRead, - Update: testResourceListUpdate, - Delete: testResourceListDelete, - - CustomizeDiff: func(d *schema.ResourceDiff, _ interface{}) error { - if d.HasChange("dependent_list") { - d.SetNewComputed("computed_list") - } - return nil - }, - - Schema: map[string]*schema.Schema{ - "list_block": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "string": { - Type: schema.TypeString, - Optional: true, - }, - "int": { - Type: schema.TypeInt, - Optional: true, - }, - "force_new": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "sublist": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "sublist_block": { - Type: schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "string": { - Type: schema.TypeString, - Required: true, - }, - "int": { - Type: schema.TypeInt, - Required: true, - }, - }, - }, - }, - "sublist_block_optional": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "list": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - }, - }, - "dependent_list": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "val": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "computed_list": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "min_items": { - Type: schema.TypeList, - Optional: true, - MinItems: 2, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "val": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "never_set": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sublist": { - Type: schema.TypeList, - MaxItems: 1, - ForceNew: true, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bool": { - Type: schema.TypeBool, - ForceNew: true, - Required: true, - }, - "string": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "map_list": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeMap}, - }, - }, - } -} - -func testResourceListCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - return testResourceListRead(d, meta) -} - -func testResourceListRead(d *schema.ResourceData, meta interface{}) error { - fixedIps := d.Get("dependent_list") - - // all_fixed_ips should be set as computed with a CustomizeDiff func, but - // we're trying to emulate legacy provider behavior, and updating a - // computed field was a common case. - ips := []interface{}{} - if fixedIps != nil { - for _, v := range fixedIps.([]interface{}) { - m := v.(map[string]interface{}) - ips = append(ips, m["val"]) - } - } - if err := d.Set("computed_list", ips); err != nil { - return err - } - - // "computing" these values should insert empty containers into the - // never_set block. - values := make(map[string]interface{}) - values["sublist"] = []interface{}{} - d.Set("never_set", []interface{}{values}) - - return nil -} - -func testResourceListUpdate(d *schema.ResourceData, meta interface{}) error { - block := d.Get("never_set").([]interface{}) - if len(block) > 0 { - // if profiles contains any values, they should not be nil - _ = block[0].(map[string]interface{}) - } - return testResourceListRead(d, meta) -} - -func testResourceListDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_list_set.go b/builtin/providers/test/resource_list_set.go deleted file mode 100644 index 0ce5abc8d..000000000 --- a/builtin/providers/test/resource_list_set.go +++ /dev/null @@ -1,192 +0,0 @@ -package test - -import ( - "fmt" - "math/rand" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceListSet() *schema.Resource { - return &schema.Resource{ - Create: testResourceListSetCreate, - Read: testResourceListSetRead, - Delete: testResourceListSetDelete, - Update: testResourceListSetUpdate, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "list": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "set": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elem": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: func(_, o, n string, _ *schema.ResourceData) bool { - return o == n - }, - }, - }, - }, - Set: func(v interface{}) int { - raw := v.(map[string]interface{}) - if el, ok := raw["elem"]; ok { - return schema.HashString(el) - } - return 42 - }, - }, - }, - }, - }, - "replication_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role": { - Type: schema.TypeString, - Required: true, - }, - "rules": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - }, - "destination": { - Type: schema.TypeSet, - MaxItems: 1, - MinItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Type: schema.TypeString, - Optional: true, - }, - "bucket": { - Type: schema.TypeString, - Required: true, - }, - "storage_class": { - Type: schema.TypeString, - Optional: true, - }, - "replica_kms_key_id": { - Type: schema.TypeString, - Optional: true, - }, - "access_control_translation": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "owner": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - }, - "source_selection_criteria": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sse_kms_encrypted_objects": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Required: true, - }, - }, - }, - }, - }, - }, - }, - "prefix": { - Type: schema.TypeString, - Optional: true, - }, - "status": { - Type: schema.TypeString, - Required: true, - }, - "priority": { - Type: schema.TypeInt, - Optional: true, - }, - "filter": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "prefix": { - Type: schema.TypeString, - Optional: true, - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func testResourceListSetCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%x", rand.Int63())) - return testResourceListSetRead(d, meta) -} - -func testResourceListSetUpdate(d *schema.ResourceData, meta interface{}) error { - return testResourceListSetRead(d, meta) -} - -func testResourceListSetRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceListSetDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_list_set_test.go b/builtin/providers/test/resource_list_set_test.go deleted file mode 100644 index f1e8353fb..000000000 --- a/builtin/providers/test/resource_list_set_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceListSet_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list_set" "foo" { - list { - set { - elem = "A" - } - set { - elem = "B" - } - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list_set.foo", "list.0.set.1255198513.elem", "B"), - resource.TestCheckResourceAttr("test_resource_list_set.foo", "list.0.set.3554254475.elem", "A"), - resource.TestCheckResourceAttr("test_resource_list_set.foo", "list.0.set.#", "2"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list_set" "foo" { - list { - set { - elem = "B" - } - set { - elem = "C" - } - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list_set.foo", "list.0.set.1255198513.elem", "B"), - resource.TestCheckResourceAttr("test_resource_list_set.foo", "list.0.set.1037565863.elem", "C"), - resource.TestCheckResourceAttr("test_resource_list_set.foo", "list.0.set.#", "2"), - ), - }, - }, - }) -} - -func TestResourceListSet_updateNested(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list_set" "foo" { - replication_configuration { - role = "role_id" - rules { - id = "foobar" - status = "Enabled" - priority = 42 - filter { - tags = { - ReplicateMe = "Yes" - } - } - destination { - bucket = "bucket_id" - storage_class = "STANDARD" - } - } - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list_set.foo", "replication_configuration.0.rules.#", "1"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list_set" "foo" { - replication_configuration { - role = "role_id" - rules { - id = "foobar" - status = "Enabled" - priority = 42 - filter { - prefix = "foo" - tags = { - ReplicateMe = "Yes" - AnotherTag = "OK" - } - } - destination { - bucket = "bucket_id" - storage_class = "STANDARD" - } - } - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list_set.foo", "replication_configuration.0.rules.#", "1"), - ), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_list_test.go b/builtin/providers/test/resource_list_test.go deleted file mode 100644 index 876a81fd5..000000000 --- a/builtin/providers/test/resource_list_test.go +++ /dev/null @@ -1,566 +0,0 @@ -package test - -import ( - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -// an empty config should be ok, because no deprecated/removed fields are set. -func TestResourceList_changed(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - string = "a" - int = 1 - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.string", "a", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.int", "1", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - string = "a" - int = 1 - } - - list_block { - string = "b" - int = 2 - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.#", "2", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.string", "a", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.int", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.1.string", "b", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.1.int", "2", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - string = "a" - int = 1 - } - - list_block { - string = "c" - int = 2 - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.#", "2", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.string", "a", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.int", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.1.string", "c", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.1.int", "2", - ), - ), - }, - }, - }) -} - -func TestResourceList_mapList(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -variable "map" { - type = map(string) - default = {} -} - -resource "test_resource_list" "foo" { - map_list = [ - { - a = "1" - }, - var.map - ] -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.foo", "map_list.1", "", - ), - ), - }, - }, - }) -} - -func TestResourceList_sublist(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - sublist_block { - string = "a" - int = 1 - } - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.sublist_block.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.sublist_block.0.string", "a", - ), - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.sublist_block.0.int", "1", - ), - ), - }, - }, - }) -} - -func TestResourceList_interpolationChanges(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - string = "x" - } -} -resource "test_resource_list" "bar" { - list_block { - string = test_resource_list.foo.id - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.string", "x", - ), - resource.TestCheckResourceAttr( - "test_resource_list.bar", "list_block.0.string", "testId", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "baz" { - list_block { - string = "x" - int = 1 - } -} -resource "test_resource_list" "bar" { - list_block { - string = test_resource_list.baz.id - int = 3 - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.baz", "list_block.0.string", "x", - ), - resource.TestCheckResourceAttr( - "test_resource_list.bar", "list_block.0.string", "testId", - ), - ), - }, - }, - }) -} - -func TestResourceList_removedForcesNew(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - force_new = "ok" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.foo", "list_block.0.force_new", "ok", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc(), - }, - }, - }) -} - -func TestResourceList_emptyStrings(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - sublist = ["a", ""] - } - - list_block { - sublist = [""] - } - - list_block { - sublist = ["", "c", ""] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.0.sublist.0", "a"), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.0.sublist.1", ""), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.1.sublist.0", ""), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.2.sublist.0", ""), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.2.sublist.1", "c"), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.2.sublist.2", ""), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - sublist = [""] - } - - list_block { - sublist = [] - } - - list_block { - sublist = ["", "c"] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.0.sublist.#", "1"), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.0.sublist.0", ""), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.1.sublist.#", "0"), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.2.sublist.1", "c"), - resource.TestCheckResourceAttr("test_resource_list.foo", "list_block.2.sublist.#", "2"), - ), - }, - }, - }) -} - -func TestResourceList_addRemove(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list.foo", "computed_list.#", "0"), - resource.TestCheckResourceAttr("test_resource_list.foo", "dependent_list.#", "0"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - dependent_list { - val = "a" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list.foo", "computed_list.#", "1"), - resource.TestCheckResourceAttr("test_resource_list.foo", "dependent_list.#", "1"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_list.foo", "computed_list.#", "0"), - resource.TestCheckResourceAttr("test_resource_list.foo", "dependent_list.#", "0"), - ), - }, - }, - }) -} - -func TestResourceList_planUnknownInterpolation(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - string = "x" - } -} -resource "test_resource_list" "bar" { - list_block { - sublist = [ - test_resource_list.foo.list_block[0].string, - ] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.bar", "list_block.0.sublist.0", "x", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - string = "x" - } - dependent_list { - val = "y" - } -} -resource "test_resource_list" "bar" { - list_block { - sublist = [ - test_resource_list.foo.computed_list[0], - ] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.bar", "list_block.0.sublist.0", "y", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - list_block { - string = "x" - } - dependent_list { - val = "z" - } -} -resource "test_resource_list" "bar" { - list_block { - sublist = [ - test_resource_list.foo.computed_list[0], - ] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.bar", "list_block.0.sublist.0", "z", - ), - ), - }, - }, - }) -} - -func TestResourceList_planUnknownInterpolationList(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - dependent_list { - val = "y" - } -} -resource "test_resource_list" "bar" { - list_block { - sublist_block_optional { - list = test_resource_list.foo.computed_list - } - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.bar", "list_block.0.sublist_block_optional.0.list.0", "y", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "foo" { - dependent_list { - val = "z" - } -} -resource "test_resource_list" "bar" { - list_block { - sublist_block_optional { - list = test_resource_list.foo.computed_list - } - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_list.bar", "list_block.0.sublist_block_optional.0.list.0", "z", - ), - ), - }, - }, - }) -} - -func TestResourceList_dynamicList(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "a" { - dependent_list { - val = "a" - } - - dependent_list { - val = "b" - } -} -resource "test_resource_list" "b" { - list_block { - string = "constant" - } - dynamic "list_block" { - for_each = test_resource_list.a.computed_list - content { - string = list_block.value - } - } -} - `), - Check: resource.ComposeTestCheckFunc(), - }, - }, - }) -} - -func TestResourceList_dynamicMinItems(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -variable "a" { - type = list(number) - default = [1] -} - -resource "test_resource_list" "b" { - dynamic "min_items" { - for_each = var.a - content { - val = "foo" - } - } -} - `), - ExpectError: regexp.MustCompile(`attribute supports 2`), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "a" { - dependent_list { - val = "a" - } - - dependent_list { - val = "b" - } -} -resource "test_resource_list" "b" { - list_block { - string = "constant" - } - dynamic "min_items" { - for_each = test_resource_list.a.computed_list - content { - val = min_items.value - } - } -} - `), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_map.go b/builtin/providers/test/resource_map.go deleted file mode 100644 index c6bf62bd6..000000000 --- a/builtin/providers/test/resource_map.go +++ /dev/null @@ -1,77 +0,0 @@ -package test - -import ( - "fmt" - - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceMap() *schema.Resource { - return &schema.Resource{ - Create: testResourceMapCreate, - Read: testResourceMapRead, - Update: testResourceMapUpdate, - Delete: testResourceMapDelete, - - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "map_of_three": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - ValidateFunc: func(v interface{}, _ string) ([]string, []error) { - errs := []error{} - for k, v := range v.(map[string]interface{}) { - if v == hcl2shim.UnknownVariableValue { - errs = append(errs, fmt.Errorf("unknown value in ValidateFunc: %q=%q", k, v)) - } - } - return nil, errs - }, - }, - "map_values": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "computed_map": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -func testResourceMapCreate(d *schema.ResourceData, meta interface{}) error { - // make sure all elements are passed to the map - m := d.Get("map_of_three").(map[string]interface{}) - if len(m) != 3 { - return fmt.Errorf("expected 3 map values, got %#v\n", m) - } - - d.SetId("testId") - return testResourceMapRead(d, meta) -} - -func testResourceMapRead(d *schema.ResourceData, meta interface{}) error { - var computedMap map[string]interface{} - if v, ok := d.GetOk("map_values"); ok { - computedMap = v.(map[string]interface{}) - } - d.Set("computed_map", computedMap) - return nil -} - -func testResourceMapUpdate(d *schema.ResourceData, meta interface{}) error { - return testResourceMapRead(d, meta) -} - -func testResourceMapDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_map_test.go b/builtin/providers/test/resource_map_test.go deleted file mode 100644 index 0d82d5f4f..000000000 --- a/builtin/providers/test/resource_map_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package test - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceMap_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - { - Config: ` -resource "test_resource_map" "foobar" { - name = "test" - map_of_three = { - one = "one" - two = "two" - empty = "" - } -}`, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_map.foobar", "map_of_three.empty", "", - ), - ), - }, - }, - }) -} - -func TestResourceMap_basicWithVars(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - { - Config: ` -variable "a" { - default = "a" -} - -variable "b" { - default = "b" -} - -resource "test_resource_map" "foobar" { - name = "test" - map_of_three = { - one = var.a - two = var.b - empty = "" - } -}`, - Check: resource.ComposeTestCheckFunc(), - }, - }, - }) -} - -func TestResourceMap_computedMap(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - { - Config: ` -resource "test_resource_map" "foobar" { - name = "test" - map_of_three = { - one = "one" - two = "two" - empty = "" - } - map_values = { - a = "1" - b = "2" - } -}`, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_map.foobar", "computed_map.a", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_map.foobar", "computed_map.b", "2", - ), - ), - }, - { - Config: ` -resource "test_resource_map" "foobar" { - name = "test" - map_of_three = { - one = "one" - two = "two" - empty = "" - } - map_values = { - a = "3" - b = "4" - } -}`, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_map.foobar", "computed_map.a", "3", - ), - resource.TestCheckResourceAttr( - "test_resource_map.foobar", "computed_map.b", "4", - ), - ), - }, - { - Config: ` -resource "test_resource_map" "foobar" { - name = "test" - map_of_three = { - one = "one" - two = "two" - empty = "" - } - map_values = { - a = "3" - } -}`, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_map.foobar", "computed_map.a", "3", - ), - resource.TestCheckNoResourceAttr( - "test_resource_map.foobar", "computed_map.b", - ), - ), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_nested.go b/builtin/providers/test/resource_nested.go deleted file mode 100644 index bff743259..000000000 --- a/builtin/providers/test/resource_nested.go +++ /dev/null @@ -1,114 +0,0 @@ -package test - -import ( - "fmt" - "math/rand" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceNested() *schema.Resource { - return &schema.Resource{ - Create: testResourceNestedCreate, - Read: testResourceNestedRead, - Delete: testResourceNestedDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "optional": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "nested": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "string": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "optional": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "nested_again": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "string": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - }, - }, - }, - "list_block": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sub_list_block": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bool": { - Type: schema.TypeBool, - Optional: true, - }, - "set": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func testResourceNestedCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%x", rand.Int63())) - return testResourceNestedRead(d, meta) -} - -func testResourceNestedUpdate(d *schema.ResourceData, meta interface{}) error { - return testResourceNestedRead(d, meta) -} - -func testResourceNestedRead(d *schema.ResourceData, meta interface{}) error { - set := []map[string]interface{}{map[string]interface{}{ - "sub_list_block": []map[string]interface{}{map[string]interface{}{ - "bool": false, - "set": schema.NewSet(schema.HashString, nil), - }}, - }} - d.Set("list_block", set) - return nil -} - -func testResourceNestedDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_nested_id.go b/builtin/providers/test/resource_nested_id.go deleted file mode 100644 index c3bd41974..000000000 --- a/builtin/providers/test/resource_nested_id.go +++ /dev/null @@ -1,48 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceNestedId() *schema.Resource { - return &schema.Resource{ - Create: testResourceNestedIdCreate, - Read: testResourceNestedIdRead, - Update: testResourceNestedIdUpdate, - Delete: testResourceNestedIdDelete, - - Schema: map[string]*schema.Schema{ - "list_block": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func testResourceNestedIdCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - return nil -} - -func testResourceNestedIdRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceNestedIdUpdate(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceNestedIdDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_nested_id_test.go b/builtin/providers/test/resource_nested_id_test.go deleted file mode 100644 index 9ca7a2468..000000000 --- a/builtin/providers/test/resource_nested_id_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceNestedId_unknownId(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_id" "foo" { -} -resource "test_resource_nested_id" "bar" { - list_block { - id = test_resource_nested_id.foo.id - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_nested_id.bar", "list_block.0.id", "testId"), - ), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_nested_set.go b/builtin/providers/test/resource_nested_set.go deleted file mode 100644 index 81d7ab0f5..000000000 --- a/builtin/providers/test/resource_nested_set.go +++ /dev/null @@ -1,171 +0,0 @@ -package test - -import ( - "fmt" - "math/rand" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceNestedSet() *schema.Resource { - return &schema.Resource{ - Create: testResourceNestedSetCreate, - Read: testResourceNestedSetRead, - Delete: testResourceNestedSetDelete, - Update: testResourceNestedSetUpdate, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "optional": { - Type: schema.TypeBool, - Optional: true, - }, - "force_new": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "type_list": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - }, - }, - }, - "single": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "value": { - Type: schema.TypeString, - ForceNew: true, - Required: true, - }, - - "optional": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - }, - }, - }, - }, - "multi": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "set": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "required": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "optional_int": { - Type: schema.TypeInt, - Optional: true, - }, - "bool": { - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - - "optional": { - Type: schema.TypeString, - // commenting this causes it to get missed during apply - //ForceNew: true, - Optional: true, - }, - "bool": { - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - "with_list": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "required": { - Type: schema.TypeString, - Required: true, - }, - "list": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "list_block": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "unused": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func testResourceNestedSetCreate(d *schema.ResourceData, meta interface{}) error { - id := fmt.Sprintf("%x", rand.Int63()) - d.SetId(id) - - // replicate some awkward handling of a computed value in a set - set := d.Get("single").(*schema.Set) - l := set.List() - if len(l) == 1 { - if s, ok := l[0].(map[string]interface{}); ok { - if v, _ := s["optional"].(string); v == "" { - s["optional"] = id - } - } - } - - d.Set("single", set) - - return testResourceNestedSetRead(d, meta) -} - -func testResourceNestedSetRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceNestedSetDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} - -func testResourceNestedSetUpdate(d *schema.ResourceData, meta interface{}) error { - return nil -} diff --git a/builtin/providers/test/resource_nested_set_test.go b/builtin/providers/test/resource_nested_set_test.go deleted file mode 100644 index dddce0e81..000000000 --- a/builtin/providers/test/resource_nested_set_test.go +++ /dev/null @@ -1,653 +0,0 @@ -package test - -import ( - "errors" - "fmt" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceNestedSet_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - single { - value = "bar" - } -} - `), - }, - }, - }) -} - -func TestResourceNestedSet_basicImport(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - single { - value = "bar" - } -} - `), - }, - resource.TestStep{ - ImportState: true, - ResourceName: "test_resource_nested_set.foo", - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - single { - value = "bar" - } -} - `), - ImportStateCheck: func(ss []*terraform.InstanceState) error { - for _, s := range ss { - if s.Attributes["multi.#"] != "0" || - s.Attributes["single.#"] != "0" || - s.Attributes["type_list.#"] != "0" || - s.Attributes["with_list.#"] != "0" { - return fmt.Errorf("missing blocks in imported state:\n%s", s) - } - } - return nil - }, - }, - }, - }) -} - -// The set should not be generated because of it's computed value -func TestResourceNestedSet_noSet(t *testing.T) { - checkFunc := func(s *terraform.State) error { - root := s.ModuleByPath(addrs.RootModuleInstance) - res := root.Resources["test_resource_nested_set.foo"] - for k, v := range res.Primary.Attributes { - if strings.HasPrefix(k, "single") && k != "single.#" { - return fmt.Errorf("unexpected set value: %s:%s", k, v) - } - } - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { -} - `), - Check: checkFunc, - }, - }, - }) -} - -// the empty type_list must be passed to the provider with 1 nil element -func TestResourceNestedSet_emptyBlock(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - type_list { - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_nested_set.foo", "type_list.#", "1"), - ), - }, - }, - }) -} - -func TestResourceNestedSet_emptyNestedListBlock(t *testing.T) { - checkFunc := func(s *terraform.State) error { - root := s.ModuleByPath(addrs.RootModuleInstance) - res := root.Resources["test_resource_nested_set.foo"] - found := false - for k := range res.Primary.Attributes { - if !regexp.MustCompile(`^with_list\.\d+\.list_block\.`).MatchString(k) { - continue - } - found = true - } - if !found { - return fmt.Errorf("with_list.X.list_block not found") - } - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - with_list { - required = "ok" - list_block { - } - } -} - `), - Check: checkFunc, - }, - }, - }) -} -func TestResourceNestedSet_emptyNestedList(t *testing.T) { - checkFunc := func(s *terraform.State) error { - root := s.ModuleByPath(addrs.RootModuleInstance) - res := root.Resources["test_resource_nested_set.foo"] - found := false - for k, v := range res.Primary.Attributes { - if regexp.MustCompile(`^with_list\.\d+\.list\.#$`).MatchString(k) { - found = true - if v != "0" { - return fmt.Errorf("expected empty list: %s, got %s", k, v) - } - break - } - } - if !found { - return fmt.Errorf("with_list.X.nested_list not found") - } - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - with_list { - required = "ok" - list = [] - } -} - `), - Check: checkFunc, - }, - }, - }) -} - -func TestResourceNestedSet_addRemove(t *testing.T) { - var id string - checkFunc := func(s *terraform.State) error { - root := s.ModuleByPath(addrs.RootModuleInstance) - res := root.Resources["test_resource_nested_set.foo"] - if res.Primary.ID == id { - return errors.New("expected new resource") - } - id = res.Primary.ID - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { -} - `), - Check: checkFunc, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - single { - value = "bar" - } -} - `), - Check: resource.ComposeTestCheckFunc( - checkFunc, - resource.TestCheckResourceAttr( - "test_resource_nested_set.foo", "single.#", "1", - ), - // the hash of single seems to change here, so we're not - // going to test for "value" directly - // FIXME: figure out why the set hash changes - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_nested_set.foo", "single.#", "0", - ), - checkFunc, - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - single { - value = "bar" - } -} - `), - Check: checkFunc, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - single { - value = "bar" - optional = "baz" - } -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { -} - `), - Check: checkFunc, - }, - }, - }) -} -func TestResourceNestedSet_multiAddRemove(t *testing.T) { - checkFunc := func(s *terraform.State) error { - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { -} - `), - Check: checkFunc, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - multi { - optional = "bar" - } -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - multi { - set { - required = "val" - } - } -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - multi { - set { - required = "new" - } - } -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - multi { - set { - required = "new" - optional_int = 3 - } - } -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - single { - value = "bar" - optional = "baz" - } - multi { - set { - required = "new" - optional_int = 3 - } - } -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - optional = true - single { - value = "bar" - optional = "baz" - } - multi { - set { - required = "new" - optional_int = 3 - } - } -} - `), - Check: checkFunc, - }, - }, - }) -} - -func TestResourceNestedSet_forceNewEmptyString(t *testing.T) { - var id string - step := 0 - checkFunc := func(s *terraform.State) error { - root := s.ModuleByPath(addrs.RootModuleInstance) - res := root.Resources["test_resource_nested_set.foo"] - defer func() { - step++ - id = res.Primary.ID - }() - - if step == 2 && res.Primary.ID == id { - // setting an empty string currently does not trigger ForceNew, but - // it should in the future. - return nil - } - - if res.Primary.ID == id { - return errors.New("expected new resource") - } - - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - multi { - set { - required = "val" - } - } -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - multi { - set { - required = "" - } - } -} - `), - Check: checkFunc, - }, - - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - force_new = "" -} - `), - Check: checkFunc, - }, - }, - }) -} - -func TestResourceNestedSet_setWithList(t *testing.T) { - checkFunc := func(s *terraform.State) error { - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - with_list { - required = "bar" - list = ["initial value"] - } -} - `), - Check: checkFunc, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - with_list { - required = "bar" - list = ["second value"] - } -} - `), - Check: checkFunc, - }, - }, - }) -} - -// This is the same as forceNewEmptyString, but we start with the empty value, -// instead of changing it. -func TestResourceNestedSet_nestedSetEmptyString(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - multi { - set { - required = "" - } - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_nested_set.foo", "multi.529860700.set.4196279896.required", "", - ), - ), - }, - }, - }) -} - -func TestResourceNestedSet_emptySet(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - multi { - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_nested_set.foo", "multi.#", "1", - ), - ), - }, - }, - }) -} - -func TestResourceNestedSet_multipleUnknownSetElements(t *testing.T) { - checkFunc := func(s *terraform.State) error { - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "a" { -} - -resource "test_resource_nested_set" "b" { -} - -resource "test_resource_nested_set" "c" { - multi { - optional = test_resource_nested_set.a.id - } - multi { - optional = test_resource_nested_set.b.id - } -} - `), - Check: checkFunc, - }, - }, - }) -} - -func TestResourceNestedSet_interpolationChanges(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "foo" { - single { - value = "x" - } -} -resource "test_resource_nested_set" "bar" { - single { - value = test_resource_nested_set.foo.id - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_nested_set.foo", "single.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_nested_set.bar", "single.#", "1", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested_set" "baz" { - single { - value = "x" - } -} -resource "test_resource_nested_set" "bar" { - single { - value = test_resource_nested_set.baz.id - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_nested_set.baz", "single.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_nested_set.bar", "single.#", "1", - ), - ), - }, - }, - }) -} - -func TestResourceNestedSet_dynamicSetBlock(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "a" { - required = "ok" - required_map = { - a = "b" - } -} - -resource "test_resource_nested_set" "foo" { - dynamic "with_list" { - iterator = thing - for_each = test_resource.a.computed_list - content { - required = thing.value - list = [thing.key] - } - } -} - `), - Check: resource.ComposeTestCheckFunc(), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_nested_test.go b/builtin/providers/test/resource_nested_test.go deleted file mode 100644 index c525f625c..000000000 --- a/builtin/providers/test/resource_nested_test.go +++ /dev/null @@ -1,217 +0,0 @@ -package test - -import ( - "errors" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceNested_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "foo" { - nested { - string = "val" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "nested.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "nested.1877647874.string", "val", - ), - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "list_block.0.sub_list_block.0.bool", "false", - ), - ), - }, - }, - }) -} - -func TestResourceNested_addRemove(t *testing.T) { - var id string - idCheck := func(s *terraform.State) error { - root := s.ModuleByPath(addrs.RootModuleInstance) - res := root.Resources["test_resource_nested.foo"] - if res.Primary.ID == id { - return errors.New("expected new resource") - } - id = res.Primary.ID - return nil - } - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - idCheck, - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "nested.#", "0", - ), - // Checking for a count of 0 and a nonexistent count should - // now be the same operation. - resource.TestCheckNoResourceAttr( - "test_resource_nested.foo", "nested.#", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "foo" { - nested { - string = "val" - } -} - `), - Check: resource.ComposeTestCheckFunc( - idCheck, - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "nested.1877647874.string", "val", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "foo" { - optional = true - nested { - string = "val" - } -} - `), - Check: resource.ComposeTestCheckFunc( - idCheck, - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "nested.1877647874.string", "val", - ), - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "optional", "true", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "foo" { - nested { - string = "val" - } -} - `), - Check: resource.ComposeTestCheckFunc( - idCheck, - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "nested.1877647874.string", "val", - ), - resource.TestCheckNoResourceAttr( - "test_resource_nested.foo", "optional", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "foo" { - nested { - string = "val" - optional = true - } -} - `), - Check: resource.ComposeTestCheckFunc( - idCheck, - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "nested.2994502535.string", "val", - ), - resource.TestCheckResourceAttr( - "test_resource_nested.foo", "nested.2994502535.optional", "true", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "foo" { -} - `), - Check: resource.ComposeTestCheckFunc( - idCheck, - resource.TestCheckNoResourceAttr( - "test_resource_nested.foo", "nested.#", - ), - ), - }, - }, - }) -} - -func TestResourceNested_dynamic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "foo" { - dynamic "nested" { - for_each = [["a"], []] - content { - string = join(",", nested.value) - optional = false - dynamic "nested_again" { - for_each = nested.value - content { - string = nested_again.value - } - } - } - } -} - `), - Check: func(s *terraform.State) error { - rs, ok := s.RootModule().Resources["test_resource_nested.foo"] - if !ok { - return errors.New("missing resource in state") - } - - got := rs.Primary.Attributes - want := map[string]string{ - "nested.#": "2", - "nested.33842314.string": "a", - "nested.33842314.optional": "false", - "nested.33842314.nested_again.#": "1", - "nested.33842314.nested_again.936590934.string": "a", - "nested.140280279.string": "", - "nested.140280279.optional": "false", - "nested.140280279.nested_again.#": "0", - "list_block.#": "1", - "list_block.0.sub_list_block.#": "1", - "list_block.0.sub_list_block.0.bool": "false", - "list_block.0.sub_list_block.0.set.#": "0", - } - delete(got, "id") // it's random, so not useful for testing - - if !cmp.Equal(got, want) { - return errors.New("wrong result\n" + cmp.Diff(want, got)) - } - - return nil - }, - }, - }, - }) -} diff --git a/builtin/providers/test/resource_provider_meta.go b/builtin/providers/test/resource_provider_meta.go deleted file mode 100644 index c05adb170..000000000 --- a/builtin/providers/test/resource_provider_meta.go +++ /dev/null @@ -1,95 +0,0 @@ -package test - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceProviderMeta() *schema.Resource { - return &schema.Resource{ - Create: testResourceProviderMetaCreate, - Read: testResourceProviderMetaRead, - Update: testResourceProviderMetaUpdate, - Delete: testResourceProviderMetaDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "optional": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -type providerMeta struct { - Foo string `cty:"foo"` -} - -func testResourceProviderMetaCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - var m providerMeta - - err := d.GetProviderMeta(&m) - if err != nil { - return err - } - - if m.Foo != "bar" { - return fmt.Errorf("expected provider_meta.foo to be %q, was %q", - "bar", m.Foo) - } - - return testResourceProviderMetaRead(d, meta) -} - -func testResourceProviderMetaRead(d *schema.ResourceData, meta interface{}) error { - var m providerMeta - - err := d.GetProviderMeta(&m) - if err != nil { - return err - } - - if m.Foo != "bar" { - return fmt.Errorf("expected provider_meta.foo to be %q, was %q", - "bar", m.Foo) - } - - return nil -} - -func testResourceProviderMetaUpdate(d *schema.ResourceData, meta interface{}) error { - var m providerMeta - - err := d.GetProviderMeta(&m) - if err != nil { - return err - } - - if m.Foo != "bar" { - return fmt.Errorf("expected provider_meta.foo to be %q, was %q", - "bar", m.Foo) - } - return testResourceProviderMetaRead(d, meta) -} - -func testResourceProviderMetaDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - var m providerMeta - - err := d.GetProviderMeta(&m) - if err != nil { - return err - } - - if m.Foo != "bar" { - return fmt.Errorf("expected provider_meta.foo to be %q, was %q", - "bar", m.Foo) - } - return nil -} diff --git a/builtin/providers/test/resource_provider_meta_test.go b/builtin/providers/test/resource_provider_meta_test.go deleted file mode 100644 index 3b92d0a40..000000000 --- a/builtin/providers/test/resource_provider_meta_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceProviderMeta_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -terraform { - provider_meta "test" { - foo = "bar" - } -} - -resource "test_resource_provider_meta" "foo" { -} - `), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_required_min.go b/builtin/providers/test/resource_required_min.go deleted file mode 100644 index 413d4c51d..000000000 --- a/builtin/providers/test/resource_required_min.go +++ /dev/null @@ -1,68 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceRequiredMin() *schema.Resource { - return &schema.Resource{ - Create: testResourceRequiredMinCreate, - Read: testResourceRequiredMinRead, - Update: testResourceRequiredMinUpdate, - Delete: testResourceRequiredMinDelete, - - CustomizeDiff: func(d *schema.ResourceDiff, _ interface{}) error { - if d.HasChange("dependent_list") { - d.SetNewComputed("computed_list") - } - return nil - }, - - Schema: map[string]*schema.Schema{ - "min_items": { - Type: schema.TypeList, - Optional: true, - MinItems: 2, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "val": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "required_min_items": { - Type: schema.TypeList, - Required: true, - MinItems: 2, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "val": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - } -} - -func testResourceRequiredMinCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - return testResourceRequiredMinRead(d, meta) -} - -func testResourceRequiredMinRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceRequiredMinUpdate(d *schema.ResourceData, meta interface{}) error { - return testResourceRequiredMinRead(d, meta) -} - -func testResourceRequiredMinDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_required_min_test.go b/builtin/providers/test/resource_required_min_test.go deleted file mode 100644 index 180c28ccf..000000000 --- a/builtin/providers/test/resource_required_min_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package test - -import ( - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResource_dynamicRequiredMinItems(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: ` -resource "test_resource_required_min" "a" { -} -`, - ExpectError: regexp.MustCompile(`"required_min_items" blocks are required`), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "a" { - dependent_list { - val = "a" - } -} - -resource "test_resource_required_min" "b" { - dynamic "required_min_items" { - for_each = test_resource_list.a.computed_list - content { - val = required_min_items.value - } - } -} - `), - ExpectError: regexp.MustCompile(`required_min_items: attribute supports 2 item as a minimum`), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_list" "c" { - dependent_list { - val = "a" - } - - dependent_list { - val = "b" - } -} - -resource "test_resource_required_min" "b" { - dynamic "required_min_items" { - for_each = test_resource_list.c.computed_list - content { - val = required_min_items.value - } - } -} - `), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_signal.go b/builtin/providers/test/resource_signal.go deleted file mode 100644 index 57e4bf0eb..000000000 --- a/builtin/providers/test/resource_signal.go +++ /dev/null @@ -1,43 +0,0 @@ -package test - -import ( - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceSignal() *schema.Resource { - return &schema.Resource{ - Create: testResourceSignalCreate, - Read: testResourceSignalRead, - Update: testResourceSignalUpdate, - Delete: testResourceSignalDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "optional": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func testResourceSignalCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - - return testResourceSignalRead(d, meta) -} - -func testResourceSignalRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceSignalUpdate(d *schema.ResourceData, meta interface{}) error { - return testResourceSignalRead(d, meta) -} - -func testResourceSignalDelete(d *schema.ResourceData, meta interface{}) error { - return nil -} diff --git a/builtin/providers/test/resource_state_func.go b/builtin/providers/test/resource_state_func.go deleted file mode 100644 index 609e5ea53..000000000 --- a/builtin/providers/test/resource_state_func.go +++ /dev/null @@ -1,118 +0,0 @@ -package test - -import ( - "crypto/sha1" - "encoding/hex" - "fmt" - "math/rand" - - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceStateFunc() *schema.Resource { - return &schema.Resource{ - Create: testResourceStateFuncCreate, - Read: testResourceStateFuncRead, - Update: testResourceStateFuncUpdate, - Delete: testResourceStateFuncDelete, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "optional": { - Type: schema.TypeString, - Optional: true, - }, - "state_func": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - StateFunc: stateFuncHash, - }, - "state_func_value": { - Type: schema.TypeString, - Optional: true, - }, - - // set block with computed elements - "set_block": { - Type: schema.TypeSet, - Optional: true, - Set: setBlockHash, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "required": { - Type: schema.TypeString, - Required: true, - }, - "optional": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func stateFuncHash(v interface{}) string { - hash := sha1.Sum([]byte(v.(string))) - return hex.EncodeToString(hash[:]) -} - -func setBlockHash(v interface{}) int { - m := v.(map[string]interface{}) - required, _ := m["required"].(string) - optional, _ := m["optional"].(string) - return hashcode.String(fmt.Sprintf("%s|%s", required, optional)) -} - -func testResourceStateFuncCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId(fmt.Sprintf("%x", rand.Int63())) - - // if we have a reference for the actual data in the state_func field, - // compare it - if data, ok := d.GetOk("state_func_value"); ok { - expected := data.(string) - got := d.Get("state_func").(string) - if expected != got { - return fmt.Errorf("expected state_func value:%q, got%q", expected, got) - } - } - - // Check that we can lookup set elements by our computed hash. - // This is not advised, but we can use this to make sure the final diff was - // prepared with the correct values. - setBlock, ok := d.GetOk("set_block") - if ok { - set := setBlock.(*schema.Set) - for _, obj := range set.List() { - idx := setBlockHash(obj) - requiredAddr := fmt.Sprintf("%s.%d.%s", "set_block", idx, "required") - _, ok := d.GetOkExists(requiredAddr) - if !ok { - return fmt.Errorf("failed to get attr %q from %#v", fmt.Sprintf(requiredAddr), d.State().Attributes) - } - } - } - - return testResourceStateFuncRead(d, meta) -} - -func testResourceStateFuncRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceStateFuncUpdate(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceStateFuncDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_state_func_test.go b/builtin/providers/test/resource_state_func_test.go deleted file mode 100644 index cf5726eea..000000000 --- a/builtin/providers/test/resource_state_func_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package test - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceStateFunc_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_state_func" "foo" { -} - `), - Check: resource.TestCheckNoResourceAttr("test_resource_state_func.foo", "state_func"), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_state_func" "foo" { - state_func = "data" - state_func_value = "data" -} - `), - Check: resource.TestCheckResourceAttr("test_resource_state_func.foo", "state_func", stateFuncHash("data")), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_state_func" "foo" { -} - `), - Check: resource.TestCheckNoResourceAttr("test_resource_state_func.foo", "state_func"), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_state_func" "foo" { - optional = "added" - state_func = "data" - state_func_value = "data" -} - `), - Check: resource.TestCheckResourceAttr("test_resource_state_func.foo", "state_func", stateFuncHash("data")), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_state_func" "foo" { - optional = "added" - state_func = "changed" - state_func_value = "changed" -} - `), - Check: resource.TestCheckResourceAttr("test_resource_state_func.foo", "state_func", stateFuncHash("changed")), - }, - }, - }) -} - -func TestResourceStateFunc_getOkSetElem(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_state_func" "foo" { -} - -resource "test_resource_state_func" "bar" { - set_block { - required = "foo" - optional = test_resource_state_func.foo.id - } - set_block { - required = test_resource_state_func.foo.id - } -} - `), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_test.go b/builtin/providers/test/resource_test.go deleted file mode 100644 index 510b7816c..000000000 --- a/builtin/providers/test/resource_test.go +++ /dev/null @@ -1,1220 +0,0 @@ -package test - -import ( - "reflect" - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestResource_basic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr( - "test_resource.foo", "list.#", - ), - ), - }, - }, - }) -} - -func TestResource_changedList(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckNoResourceAttr( - "test_resource.foo", "list.#", - ), - ), - }, - { - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - list = ["a"] -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "list.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource.foo", "list.0", "a", - ), - ), - }, - { - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - list = ["a", "b"] -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "list.#", "2", - ), - resource.TestCheckResourceAttr( - "test_resource.foo", "list.0", "a", - ), - resource.TestCheckResourceAttr( - "test_resource.foo", "list.1", "b", - ), - ), - }, - { - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - list = ["b"] -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "list.#", "1", - ), - resource.TestCheckResourceAttr( - "test_resource.foo", "list.0", "b", - ), - ), - }, - }, - }) -} - -// Targeted test in TestContext2Apply_ignoreChangesCreate -func TestResource_ignoreChangesRequired(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - lifecycle { - ignore_changes = ["required"] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} - -func TestResource_ignoreChangesEmpty(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "one" - lifecycle { - ignore_changes = [] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "two" - lifecycle { - ignore_changes = [] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} - -func TestResource_ignoreChangesForceNew(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "one" - lifecycle { - ignore_changes = ["optional_force_new"] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "two" - lifecycle { - ignore_changes = ["optional_force_new"] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} - -// Covers specific scenario in #6005, handled by normalizing boolean strings in -// helper/schema -func TestResource_ignoreChangesForceNewBoolean(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "one" - optional_bool = true - lifecycle { - ignore_changes = ["optional_force_new"] - } -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "two" - optional_bool = true - lifecycle { - ignore_changes = ["optional_force_new"] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} - -func TestResource_ignoreChangesMap(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_computed_map = { - foo = "bar" - } - lifecycle { - ignore_changes = ["optional_computed_map"] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_computed_map = { - foo = "bar" - no = "update" - } - lifecycle { - ignore_changes = ["optional_computed_map"] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} - -func TestResource_ignoreChangesDependent(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - count = 2 - required = "yep" - required_map = { - key = "value" - } - - optional_force_new = "one" - lifecycle { - ignore_changes = ["optional_force_new"] - } -} -resource "test_resource" "bar" { - count = 2 - required = "yep" - required_map = { - key = "value" - } - optional = "${element(test_resource.foo.*.id, count.index)}" -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - count = 2 - required = "yep" - required_map = { - key = "value" - } - - optional_force_new = "two" - lifecycle { - ignore_changes = ["optional_force_new"] - } -} -resource "test_resource" "bar" { - count = 2 - required = "yep" - required_map = { - key = "value" - } - optional = "${element(test_resource.foo.*.id, count.index)}" -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} - -func TestResource_ignoreChangesStillReplaced(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "one" - optional_bool = true - lifecycle { - ignore_changes = ["optional_bool"] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "two" - optional_bool = false - lifecycle { - ignore_changes = ["optional_bool"] - } -} - `), - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} - -func TestResource_ignoreChangesCustomizeDiff(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional = "a" - lifecycle { - ignore_changes = [optional] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "planned_computed", "a", - ), - ), - }, - // On this step, `optional` changes, but `planned_computed` - // should remain as "a" because we have set `ignore_changes` - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional = "b" - lifecycle { - ignore_changes = [optional] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "planned_computed", "a", - ), - ), - }, - }, - }) -} - -// Reproduces plan-time panic when the wrong type is interpolated in a list of -// maps. -// TODO: this should return a type error, rather than silently setting an empty -// list -func TestResource_dataSourceListMapPanic(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "val" - required_map = {x = "y"} - list_of_map = "${var.maplist}" -} - -variable "maplist" { - type = "list" - - default = [ - {a = "b"} - ] -} - `), - ExpectError: nil, - Check: func(s *terraform.State) error { - return nil - }, - }, - }, - }) -} - -func TestResource_dataSourceIndexMapList(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "val" - - required_map = { - x = "y" - } - - list_of_map = [ - { - a = "1" - b = "2" - }, - { - c = "3" - d = "4" - }, - ] -} - -output "map_from_list" { - value = "${test_resource.foo.list_of_map[0]}" -} - -output "value_from_map_from_list" { - value = "${lookup(test_resource.foo.list_of_map[1], "d")}" -} - `), - ExpectError: nil, - Check: func(s *terraform.State) error { - root := s.ModuleByPath(addrs.RootModuleInstance) - mapOut := root.Outputs["map_from_list"].Value - expectedMapOut := map[string]interface{}{ - "a": "1", - "b": "2", - } - - valueOut := root.Outputs["value_from_map_from_list"].Value - expectedValueOut := "4" - - if !reflect.DeepEqual(mapOut, expectedMapOut) { - t.Fatalf("Expected: %#v\nGot: %#v", expectedMapOut, mapOut) - } - if !reflect.DeepEqual(valueOut, expectedValueOut) { - t.Fatalf("Expected: %#v\nGot: %#v", valueOut, expectedValueOut) - } - return nil - }, - }, - }, - }) -} - -func testAccCheckResourceDestroy(s *terraform.State) error { - return nil -} - -func TestResource_removeForceNew(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_force_new = "here" -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } -} - `), - }, - }, - }) -} - -func TestResource_unknownFuncInMap(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "ok" - required_map = { - key = "${uuid()}" - } -} - `), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -// Verify that we can destroy when a managed resource references something with -// a count of 1. -func TestResource_countRefDestroyError(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: strings.TrimSpace(` -resource "test_resource" "one" { - count = 1 - required = "ok" - required_map = { - key = "val" - } -} - -resource "test_resource" "two" { - required = test_resource.one[0].id - required_map = { - key = "val" - } -} - `), - }, - }, - }) -} - -func TestResource_emptyMapValue(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "ok" - required_map = { - a = "a" - b = "" - } -} - `), - }, - }, - }) -} - -func TestResource_updateError(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "first" - required_map = { - a = "a" - } -} -`), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "second" - required_map = { - a = "a" - } - apply_error = "update_error" -} -`), - ExpectError: regexp.MustCompile("update_error"), - }, - }, - }) -} - -func TestResource_applyError(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "second" - required_map = { - a = "a" - } - apply_error = "apply_error" -} -`), - ExpectError: regexp.MustCompile("apply_error"), - }, - }, - }) -} - -func TestResource_emptyStrings(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "second" - required_map = { - a = "a" - } - - list = [""] -} -`), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource.foo", "list.0", ""), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "second" - required_map = { - a = "a" - } - - list = ["", "b"] -} -`), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource.foo", "list.0", ""), - resource.TestCheckResourceAttr("test_resource.foo", "list.1", "b"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "second" - required_map = { - a = "a" - } - - list = [""] -} -`), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource.foo", "list.0", ""), - ), - }, - }, - }) -} - -func TestResource_setDrift(t *testing.T) { - testProvider := testAccProviders["test"] - res := testProvider.(*schema.Provider).ResourcesMap["test_resource"] - - // reset the Read function after the test - defer func() { - res.Read = testResourceRead - }() - - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "first" - required_map = { - a = "a" - } - set = ["a", "b"] -} -`), - Check: func(s *terraform.State) error { - return nil - }, - }, - resource.TestStep{ - PreConfig: func() { - // update the Read function to return the wrong "set" attribute values. - res.Read = func(d *schema.ResourceData, meta interface{}) error { - // update as expected first - if err := testResourceRead(d, meta); err != nil { - return err - } - d.Set("set", []interface{}{"a", "x"}) - return nil - } - }, - // Leave the config, so we can detect the mismatched set values. - // Updating the config would force the test to pass even if the Read - // function values were ignored. - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "second" - required_map = { - a = "a" - } - set = ["a", "b"] -} -`), - ExpectNonEmptyPlan: true, - }, - }, - }) -} - -func TestResource_optionalComputedMap(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_computed_map = { - foo = "bar" - baz = "" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "optional_computed_map.foo", "bar", - ), - resource.TestCheckResourceAttr( - "test_resource.foo", "optional_computed_map.baz", "", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_computed_map = {} -} - `), - // removing the map from the config should still leave an empty computed map - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "optional_computed_map.%", "0", - ), - ), - }, - }, - }) -} - -func TestResource_plannedComputed(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "ok" - required_map = { - key = "value" - } - optional = "hi" -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "planned_computed", "hi", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "ok" - required_map = { - key = "value" - } - optional = "changed" -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "planned_computed", "changed", - ), - ), - }, - }, - }) -} - -func TestDiffApply_map(t *testing.T) { - resSchema := map[string]*schema.Schema{ - "map": { - Type: schema.TypeMap, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - } - - priorAttrs := map[string]string{ - "id": "ok", - "map.%": "2", - "map.foo": "bar", - "map.bar": "", - } - - diff := &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "map.foo": &terraform.ResourceAttrDiff{Old: "bar", New: "", NewRemoved: true}, - "map.bar": &terraform.ResourceAttrDiff{Old: "", New: "", NewRemoved: true}, - }, - } - - newAttrs, err := diff.Apply(priorAttrs, (&schema.Resource{Schema: resSchema}).CoreConfigSchema()) - if err != nil { - t.Fatal(err) - } - - expect := map[string]string{ - "id": "ok", - "map.%": "0", - } - - if !reflect.DeepEqual(newAttrs, expect) { - t.Fatalf("expected:%#v got:%#v", expect, newAttrs) - } -} - -func TestResource_dependsComputed(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -variable "change" { - default = false -} - -resource "test_resource" "foo" { - required = "ok" - required_map = { - key = "value" - } - optional = var.change ? "after" : "" -} - -resource "test_resource" "bar" { - count = var.change ? 1 : 0 - required = test_resource.foo.planned_computed - required_map = { - key = "value" - } -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -variable "change" { - default = true -} - -resource "test_resource" "foo" { - required = "ok" - required_map = { - key = "value" - } - optional = var.change ? "after" : "" -} - -resource "test_resource" "bar" { - count = var.change ? 1 : 0 - required = test_resource.foo.planned_computed - required_map = { - key = "value" - } -} - `), - }, - }, - }) -} - -func TestResource_optionalComputedBool(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } -} - `), - }, - }, - }) -} - -func TestResource_replacedOptionalComputed(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "a" { -} - -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_computed = test_resource_nested.a.id -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_nested" "b" { -} - -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional_computed = test_resource_nested.b.id -} - `), - }, - }, - }) -} - -func TestResource_floatInIntAttr(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - int = 40.2 -} - `), - ExpectError: regexp.MustCompile(`must be a whole number, got 40.2`), - }, - }, - }) -} - -func TestResource_unsetNil(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - optional = "a" -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource.foo", "optional", "a"), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource.foo", "optional", ""), - ), - }, - }, - }) -} - -// Verify we can use use numeric indices in `ignore_changes` paths. -func TestResource_ignoreChangesIndex(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - list_of_map = [ - { - a = "b" - } - ] - - lifecycle { - ignore_changes = [list_of_map[0]["a"]] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "list_of_map.0.a", "b", - ), - ), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - list_of_map = [ - { - a = "c" - } - ] - - lifecycle { - ignore_changes = [list_of_map[0]["a"]] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "list_of_map.0.a", "b", - ), - ), - }, - // set ignore_changes to a prefix of the changed value - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource" "foo" { - required = "yep" - required_map = { - key = "value" - } - list_of_map = [ - { - a = "d" - } - ] - - lifecycle { - ignore_changes = [list_of_map[0]] - } -} - `), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "test_resource.foo", "list_of_map.0.a", "b", - ), - ), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_timeout.go b/builtin/providers/test/resource_timeout.go deleted file mode 100644 index a10717550..000000000 --- a/builtin/providers/test/resource_timeout.go +++ /dev/null @@ -1,125 +0,0 @@ -package test - -import ( - "fmt" - "time" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceTimeout() *schema.Resource { - return &schema.Resource{ - Create: testResourceTimeoutCreate, - Read: testResourceTimeoutRead, - Update: testResourceTimeoutUpdate, - Delete: testResourceTimeoutDelete, - - // Due to the schema version also being stashed in the private/meta - // data, we need to ensure that it does not overwrite the map - // containing the timeouts. - SchemaVersion: 1, - - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(time.Second), - Update: schema.DefaultTimeout(time.Second), - Delete: schema.DefaultTimeout(time.Second), - }, - - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - - Schema: map[string]*schema.Schema{ - "create_delay": { - Type: schema.TypeString, - Optional: true, - }, - "read_delay": { - Type: schema.TypeString, - Optional: true, - }, - "update_delay": { - Type: schema.TypeString, - Optional: true, - }, - "delete_delay": { - Type: schema.TypeString, - Optional: true, - }, - }, - } -} - -func testResourceTimeoutCreate(d *schema.ResourceData, meta interface{}) error { - delayString := d.Get("create_delay").(string) - var delay time.Duration - var err error - if delayString != "" { - delay, err = time.ParseDuration(delayString) - if err != nil { - return err - } - } - - if delay > d.Timeout(schema.TimeoutCreate) { - return fmt.Errorf("timeout while creating resource") - } - - d.SetId("testId") - - return testResourceRead(d, meta) -} - -func testResourceTimeoutRead(d *schema.ResourceData, meta interface{}) error { - delayString := d.Get("read_delay").(string) - var delay time.Duration - var err error - if delayString != "" { - delay, err = time.ParseDuration(delayString) - if err != nil { - return err - } - } - - if delay > d.Timeout(schema.TimeoutRead) { - return fmt.Errorf("timeout while reading resource") - } - - return nil -} - -func testResourceTimeoutUpdate(d *schema.ResourceData, meta interface{}) error { - delayString := d.Get("update_delay").(string) - var delay time.Duration - var err error - if delayString != "" { - delay, err = time.ParseDuration(delayString) - if err != nil { - return err - } - } - - if delay > d.Timeout(schema.TimeoutUpdate) { - return fmt.Errorf("timeout while updating resource") - } - return nil -} - -func testResourceTimeoutDelete(d *schema.ResourceData, meta interface{}) error { - delayString := d.Get("delete_delay").(string) - var delay time.Duration - var err error - if delayString != "" { - delay, err = time.ParseDuration(delayString) - if err != nil { - return err - } - } - - if delay > d.Timeout(schema.TimeoutDelete) { - return fmt.Errorf("timeout while deleting resource") - } - - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_timeout_test.go b/builtin/providers/test/resource_timeout_test.go deleted file mode 100644 index 312a37a78..000000000 --- a/builtin/providers/test/resource_timeout_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package test - -import ( - "regexp" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestResourceTimeout_create(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { - create_delay = "2s" - timeouts { - create = "1s" - } -} - `), - ExpectError: regexp.MustCompile("timeout while creating resource"), - }, - }, - }) -} - -// start with the default, then modify it -func TestResourceTimeout_defaults(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { - update_delay = "1ms" -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { - update_delay = "2ms" - timeouts { - update = "3s" - } -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { - update_delay = "2s" - delete_delay = "2s" - timeouts { - delete = "3s" - update = "3s" - } -} - `), - }, - // delete "foo" - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "bar" { -} - `), - }, - }, - }) -} - -func TestResourceTimeout_delete(t *testing.T) { - // If the delete timeout isn't saved until destroy, the cleanup here will - // fail because the default is only 20m. - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { - delete_delay = "25m" - timeouts { - delete = "30m" - } -} - `), - }, - }, - }) -} -func TestResourceTimeout_update(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { - update_delay = "1s" - timeouts { - update = "1s" - } -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { - update_delay = "2s" - timeouts { - update = "1s" - } -} - `), - ExpectError: regexp.MustCompile("timeout while updating resource"), - }, - }, - }) -} - -func TestResourceTimeout_read(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - CheckDestroy: testAccCheckResourceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { -} - `), - }, - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { - read_delay = "30m" -} - `), - ExpectError: regexp.MustCompile("timeout while reading resource"), - }, - // we need to remove the read_delay so that the resource can be - // destroyed in the final step, but expect an error here from the - // pre-existing delay. - resource.TestStep{ - Config: strings.TrimSpace(` -resource "test_resource_timeout" "foo" { -} - `), - ExpectError: regexp.MustCompile("timeout while reading resource"), - }, - }, - }) -} diff --git a/builtin/providers/test/resource_undeletable.go b/builtin/providers/test/resource_undeletable.go deleted file mode 100644 index e5c9bb3b0..000000000 --- a/builtin/providers/test/resource_undeletable.go +++ /dev/null @@ -1,30 +0,0 @@ -package test - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceUndeleteable() *schema.Resource { - return &schema.Resource{ - Create: testResourceUndeleteableCreate, - Read: testResourceUndeleteableRead, - Delete: testResourceUndeleteableDelete, - - Schema: map[string]*schema.Schema{}, - } -} - -func testResourceUndeleteableCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("placeholder") - return testResourceUndeleteableRead(d, meta) -} - -func testResourceUndeleteableRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - -func testResourceUndeleteableDelete(d *schema.ResourceData, meta interface{}) error { - return fmt.Errorf("test_undeleteable always fails deletion (use terraform state rm if you really want to delete it)") -} diff --git a/builtin/providers/test/resource_with_custom_diff.go b/builtin/providers/test/resource_with_custom_diff.go deleted file mode 100644 index 10756548c..000000000 --- a/builtin/providers/test/resource_with_custom_diff.go +++ /dev/null @@ -1,154 +0,0 @@ -package test - -import ( - "fmt" - - "github.com/hashicorp/terraform/helper/schema" -) - -func testResourceCustomDiff() *schema.Resource { - return &schema.Resource{ - Create: testResourceCustomDiffCreate, - Read: testResourceCustomDiffRead, - CustomizeDiff: testResourceCustomDiffCustomizeDiff, - Update: testResourceCustomDiffUpdate, - Delete: testResourceCustomDiffDelete, - Schema: map[string]*schema.Schema{ - "required": { - Type: schema.TypeString, - Required: true, - }, - "computed": { - Type: schema.TypeInt, - Computed: true, - }, - "index": { - Type: schema.TypeInt, - Computed: true, - }, - "veto": { - Type: schema.TypeBool, - Optional: true, - }, - "list": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - }, - } -} - -type listDiffCases struct { - Type string - Value string -} - -func testListDiffCases(index int) []listDiffCases { - switch index { - case 0: - return []listDiffCases{ - { - Type: "add", - Value: "dc1", - }, - } - case 1: - return []listDiffCases{ - { - Type: "remove", - Value: "dc1", - }, - { - Type: "add", - Value: "dc2", - }, - { - Type: "add", - Value: "dc3", - }, - } - } - return nil -} - -func testListDiffCasesReadResult(index int) []interface{} { - switch index { - case 1: - return []interface{}{"dc1"} - default: - return []interface{}{"dc2", "dc3"} - } -} - -func testResourceCustomDiffCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") - - // Required must make it through to Create - if _, ok := d.GetOk("required"); !ok { - return fmt.Errorf("missing attribute 'required', but it's required") - } - - _, new := d.GetChange("computed") - expected := new.(int) - 1 - actual := d.Get("index").(int) - if expected != actual { - return fmt.Errorf("expected computed to be 1 ahead of index, got computed: %d, index: %d", expected, actual) - } - d.Set("index", new) - - return testResourceCustomDiffRead(d, meta) -} - -func testResourceCustomDiffRead(d *schema.ResourceData, meta interface{}) error { - if err := d.Set("list", testListDiffCasesReadResult(d.Get("index").(int))); err != nil { - return err - } - return nil -} - -func testResourceCustomDiffCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error { - if d.Get("veto").(bool) == true { - return fmt.Errorf("veto is true, diff vetoed") - } - // Note that this gets put into state after the update, regardless of whether - // or not anything is acted upon in the diff. - d.SetNew("computed", d.Get("computed").(int)+1) - - // This tests a diffed list, based off of the value of index - dcs := testListDiffCases(d.Get("index").(int)) - s := d.Get("list").([]interface{}) - for _, dc := range dcs { - switch dc.Type { - case "add": - s = append(s, dc.Value) - case "remove": - for i := range s { - if s[i].(string) == dc.Value { - copy(s[i:], s[i+1:]) - s = s[:len(s)-1] - break - } - } - } - } - d.SetNew("list", s) - - return nil -} - -func testResourceCustomDiffUpdate(d *schema.ResourceData, meta interface{}) error { - _, new := d.GetChange("computed") - expected := new.(int) - 1 - actual := d.Get("index").(int) - if expected != actual { - return fmt.Errorf("expected computed to be 1 ahead of index, got computed: %d, index: %d", expected, actual) - } - d.Set("index", new) - return testResourceCustomDiffRead(d, meta) -} - -func testResourceCustomDiffDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} diff --git a/builtin/providers/test/resource_with_custom_diff_test.go b/builtin/providers/test/resource_with_custom_diff_test.go deleted file mode 100644 index 05982bec9..000000000 --- a/builtin/providers/test/resource_with_custom_diff_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package test - -import ( - "fmt" - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -// TestResourceWithCustomDiff test custom diff behaviour. -func TestResourceWithCustomDiff(t *testing.T) { - resource.UnitTest(t, resource.TestCase{ - Providers: testAccProviders, - Steps: []resource.TestStep{ - { - Config: resourceWithCustomDiffConfig(false), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "computed", "1"), - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "index", "1"), - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "list.#", "1"), - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "list.0", "dc1"), - ), - ExpectNonEmptyPlan: true, - }, - { - Config: resourceWithCustomDiffConfig(false), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "computed", "2"), - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "index", "2"), - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "list.#", "2"), - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "list.0", "dc2"), - resource.TestCheckResourceAttr("test_resource_with_custom_diff.foo", "list.1", "dc3"), - resource.TestCheckNoResourceAttr("test_resource_with_custom_diff.foo", "list.2"), - ), - ExpectNonEmptyPlan: true, - }, - { - Config: resourceWithCustomDiffConfig(true), - ExpectError: regexp.MustCompile("veto is true, diff vetoed"), - }, - }, - }) -} - -func resourceWithCustomDiffConfig(veto bool) string { - return fmt.Sprintf(` -resource "test_resource_with_custom_diff" "foo" { - required = "yep" - veto = %t -} -`, veto) -} diff --git a/builtin/provisioners/chef/linux_provisioner.go b/builtin/provisioners/chef/linux_provisioner.go deleted file mode 100644 index 399967700..000000000 --- a/builtin/provisioners/chef/linux_provisioner.go +++ /dev/null @@ -1,115 +0,0 @@ -package chef - -import ( - "fmt" - "path" - "strings" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/terraform" -) - -const ( - chmod = "find %s -maxdepth 1 -type f -exec /bin/chmod %d {} +" - installURL = "https://omnitruck.chef.io/install.sh" -) - -func (p *provisioner) linuxInstallChefClient(o terraform.UIOutput, comm communicator.Communicator) error { - // Build up the command prefix - prefix := "" - if p.HTTPProxy != "" { - prefix += fmt.Sprintf("http_proxy='%s' ", p.HTTPProxy) - } - if p.HTTPSProxy != "" { - prefix += fmt.Sprintf("https_proxy='%s' ", p.HTTPSProxy) - } - if len(p.NOProxy) > 0 { - prefix += fmt.Sprintf("no_proxy='%s' ", strings.Join(p.NOProxy, ",")) - } - - // First download the install.sh script from Chef - err := p.runCommand(o, comm, fmt.Sprintf("%scurl -LO %s", prefix, installURL)) - if err != nil { - return err - } - - // Then execute the install.sh scrip to download and install Chef Client - err = p.runCommand(o, comm, fmt.Sprintf("%sbash ./install.sh -v %q -c %s", prefix, p.Version, p.Channel)) - if err != nil { - return err - } - - // And finally cleanup the install.sh script again - return p.runCommand(o, comm, fmt.Sprintf("%srm -f install.sh", prefix)) -} - -func (p *provisioner) linuxCreateConfigFiles(o terraform.UIOutput, comm communicator.Communicator) error { - // Make sure the config directory exists - if err := p.runCommand(o, comm, "mkdir -p "+linuxConfDir); err != nil { - return err - } - - // Make sure we have enough rights to upload the files if using sudo - if p.useSudo { - if err := p.runCommand(o, comm, "chmod 777 "+linuxConfDir); err != nil { - return err - } - if err := p.runCommand(o, comm, fmt.Sprintf(chmod, linuxConfDir, 666)); err != nil { - return err - } - } - - if err := p.deployConfigFiles(o, comm, linuxConfDir); err != nil { - return err - } - - if len(p.OhaiHints) > 0 { - // Make sure the hits directory exists - hintsDir := path.Join(linuxConfDir, "ohai/hints") - if err := p.runCommand(o, comm, "mkdir -p "+hintsDir); err != nil { - return err - } - - // Make sure we have enough rights to upload the hints if using sudo - if p.useSudo { - if err := p.runCommand(o, comm, "chmod 777 "+hintsDir); err != nil { - return err - } - if err := p.runCommand(o, comm, fmt.Sprintf(chmod, hintsDir, 666)); err != nil { - return err - } - } - - if err := p.deployOhaiHints(o, comm, hintsDir); err != nil { - return err - } - - // When done copying the hints restore the rights and make sure root is owner - if p.useSudo { - if err := p.runCommand(o, comm, "chmod 755 "+hintsDir); err != nil { - return err - } - if err := p.runCommand(o, comm, fmt.Sprintf(chmod, hintsDir, 600)); err != nil { - return err - } - if err := p.runCommand(o, comm, "chown -R root:root "+hintsDir); err != nil { - return err - } - } - } - - // When done copying all files restore the rights and make sure root is owner - if p.useSudo { - if err := p.runCommand(o, comm, "chmod 755 "+linuxConfDir); err != nil { - return err - } - if err := p.runCommand(o, comm, fmt.Sprintf(chmod, linuxConfDir, 600)); err != nil { - return err - } - if err := p.runCommand(o, comm, "chown -R root:root "+linuxConfDir); err != nil { - return err - } - } - - return nil -} diff --git a/builtin/provisioners/chef/linux_provisioner_test.go b/builtin/provisioners/chef/linux_provisioner_test.go deleted file mode 100644 index 6bf43b57a..000000000 --- a/builtin/provisioners/chef/linux_provisioner_test.go +++ /dev/null @@ -1,330 +0,0 @@ -package chef - -import ( - "fmt" - "path" - "testing" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvider_linuxInstallChefClient(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - }{ - "Sudo": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "sudo curl -LO https://omnitruck.chef.io/install.sh": true, - "sudo bash ./install.sh -v \"\" -c stable": true, - "sudo rm -f install.sh": true, - }, - }, - - "NoSudo": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "secret_key": "SECRET-KEY", - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "curl -LO https://omnitruck.chef.io/install.sh": true, - "bash ./install.sh -v \"\" -c stable": true, - "rm -f install.sh": true, - }, - }, - - "HTTPProxy": { - Config: map[string]interface{}{ - "http_proxy": "http://proxy.local", - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "http_proxy='http://proxy.local' curl -LO https://omnitruck.chef.io/install.sh": true, - "http_proxy='http://proxy.local' bash ./install.sh -v \"\" -c stable": true, - "http_proxy='http://proxy.local' rm -f install.sh": true, - }, - }, - - "HTTPSProxy": { - Config: map[string]interface{}{ - "https_proxy": "https://proxy.local", - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "https_proxy='https://proxy.local' curl -LO https://omnitruck.chef.io/install.sh": true, - "https_proxy='https://proxy.local' bash ./install.sh -v \"\" -c stable": true, - "https_proxy='https://proxy.local' rm -f install.sh": true, - }, - }, - - "NoProxy": { - Config: map[string]interface{}{ - "http_proxy": "http://proxy.local", - "no_proxy": []interface{}{"http://local.local", "http://local.org"}, - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "http_proxy='http://proxy.local' no_proxy='http://local.local,http://local.org' " + - "curl -LO https://omnitruck.chef.io/install.sh": true, - "http_proxy='http://proxy.local' no_proxy='http://local.local,http://local.org' " + - "bash ./install.sh -v \"\" -c stable": true, - "http_proxy='http://proxy.local' no_proxy='http://local.local,http://local.org' " + - "rm -f install.sh": true, - }, - }, - - "Version": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "version": "11.18.6", - }, - - Commands: map[string]bool{ - "curl -LO https://omnitruck.chef.io/install.sh": true, - "bash ./install.sh -v \"11.18.6\" -c stable": true, - "rm -f install.sh": true, - }, - }, - - "Channel": { - Config: map[string]interface{}{ - "channel": "current", - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "version": "11.18.6", - }, - - Commands: map[string]bool{ - "curl -LO https://omnitruck.chef.io/install.sh": true, - "bash ./install.sh -v \"11.18.6\" -c current": true, - "rm -f install.sh": true, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - p.useSudo = !p.PreventSudo - - err = p.linuxInstallChefClient(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -func TestResourceProvider_linuxCreateConfigFiles(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - Uploads map[string]string - }{ - "Sudo": { - Config: map[string]interface{}{ - "ohai_hints": []interface{}{"testdata/ohaihint.json"}, - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "secret_key": "SECRET-KEY", - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "sudo mkdir -p " + linuxConfDir: true, - "sudo chmod 777 " + linuxConfDir: true, - "sudo " + fmt.Sprintf(chmod, linuxConfDir, 666): true, - "sudo mkdir -p " + path.Join(linuxConfDir, "ohai/hints"): true, - "sudo chmod 777 " + path.Join(linuxConfDir, "ohai/hints"): true, - "sudo " + fmt.Sprintf(chmod, path.Join(linuxConfDir, "ohai/hints"), 666): true, - "sudo chmod 755 " + path.Join(linuxConfDir, "ohai/hints"): true, - "sudo " + fmt.Sprintf(chmod, path.Join(linuxConfDir, "ohai/hints"), 600): true, - "sudo chown -R root:root " + path.Join(linuxConfDir, "ohai/hints"): true, - "sudo chmod 755 " + linuxConfDir: true, - "sudo " + fmt.Sprintf(chmod, linuxConfDir, 600): true, - "sudo chown -R root:root " + linuxConfDir: true, - }, - - Uploads: map[string]string{ - linuxConfDir + "/client.rb": defaultLinuxClientConf, - linuxConfDir + "/encrypted_data_bag_secret": "SECRET-KEY", - linuxConfDir + "/first-boot.json": `{"run_list":["cookbook::recipe"]}`, - linuxConfDir + "/ohai/hints/ohaihint.json": "OHAI-HINT-FILE", - linuxConfDir + "/bob.pem": "USER-KEY", - }, - }, - - "NoSudo": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "secret_key": "SECRET-KEY", - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "mkdir -p " + linuxConfDir: true, - }, - - Uploads: map[string]string{ - linuxConfDir + "/client.rb": defaultLinuxClientConf, - linuxConfDir + "/encrypted_data_bag_secret": "SECRET-KEY", - linuxConfDir + "/first-boot.json": `{"run_list":["cookbook::recipe"]}`, - linuxConfDir + "/bob.pem": "USER-KEY", - }, - }, - - "Proxy": { - Config: map[string]interface{}{ - "http_proxy": "http://proxy.local", - "https_proxy": "https://proxy.local", - "no_proxy": []interface{}{"http://local.local", "https://local.local"}, - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "secret_key": "SECRET-KEY", - "server_url": "https://chef.local", - "ssl_verify_mode": "verify_none", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "mkdir -p " + linuxConfDir: true, - }, - - Uploads: map[string]string{ - linuxConfDir + "/client.rb": proxyLinuxClientConf, - linuxConfDir + "/encrypted_data_bag_secret": "SECRET-KEY", - linuxConfDir + "/first-boot.json": `{"run_list":["cookbook::recipe"]}`, - linuxConfDir + "/bob.pem": "USER-KEY", - }, - }, - - "Attributes JSON": { - Config: map[string]interface{}{ - "attributes_json": `{"key1":{"subkey1":{"subkey2a":["val1","val2","val3"],` + - `"subkey2b":{"subkey3":"value3"}}},"key2":"value2"}`, - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "secret_key": "SECRET-KEY", - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "mkdir -p " + linuxConfDir: true, - }, - - Uploads: map[string]string{ - linuxConfDir + "/client.rb": defaultLinuxClientConf, - linuxConfDir + "/encrypted_data_bag_secret": "SECRET-KEY", - linuxConfDir + "/bob.pem": "USER-KEY", - linuxConfDir + "/first-boot.json": `{"key1":{"subkey1":{"subkey2a":["val1","val2","val3"],` + - `"subkey2b":{"subkey3":"value3"}}},"key2":"value2","run_list":["cookbook::recipe"]}`, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - c.Uploads = tc.Uploads - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - p.useSudo = !p.PreventSudo - - err = p.linuxCreateConfigFiles(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -const defaultLinuxClientConf = `log_location STDOUT -chef_server_url "https://chef.local/" -node_name "nodename1"` - -const proxyLinuxClientConf = `log_location STDOUT -chef_server_url "https://chef.local/" -node_name "nodename1" - -http_proxy "http://proxy.local" -ENV['http_proxy'] = "http://proxy.local" -ENV['HTTP_PROXY'] = "http://proxy.local" - -https_proxy "https://proxy.local" -ENV['https_proxy'] = "https://proxy.local" -ENV['HTTPS_PROXY'] = "https://proxy.local" - -no_proxy "http://local.local,https://local.local" -ENV['no_proxy'] = "http://local.local,https://local.local" - -ssl_verify_mode :verify_none` diff --git a/builtin/provisioners/chef/resource_provisioner.go b/builtin/provisioners/chef/resource_provisioner.go deleted file mode 100644 index 6f2f3ae52..000000000 --- a/builtin/provisioners/chef/resource_provisioner.go +++ /dev/null @@ -1,904 +0,0 @@ -package chef - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "log" - "os" - "path" - "regexp" - "strconv" - "strings" - "sync" - "text/template" - "time" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/go-homedir" - "github.com/mitchellh/go-linereader" -) - -const ( - clienrb = "client.rb" - defaultEnv = "_default" - firstBoot = "first-boot.json" - logfileDir = "logfiles" - linuxChefCmd = "chef-client" - linuxConfDir = "/etc/chef" - linuxNoOutput = "> /dev/null 2>&1" - linuxGemCmd = "/opt/chef/embedded/bin/gem" - linuxKnifeCmd = "knife" - secretKey = "encrypted_data_bag_secret" - windowsChefCmd = "cmd /c chef-client" - windowsConfDir = "C:/chef" - windowsNoOutput = "> nul 2>&1" - windowsGemCmd = "C:/opscode/chef/embedded/bin/gem" - windowsKnifeCmd = "cmd /c knife" -) - -const clientConf = ` -log_location STDOUT -chef_server_url "{{ .ServerURL }}" -node_name "{{ .NodeName }}" -{{ if .UsePolicyfile }} -use_policyfile true -policy_group "{{ .PolicyGroup }}" -policy_name "{{ .PolicyName }}" -{{ end -}} - -{{ if .HTTPProxy }} -http_proxy "{{ .HTTPProxy }}" -ENV['http_proxy'] = "{{ .HTTPProxy }}" -ENV['HTTP_PROXY'] = "{{ .HTTPProxy }}" -{{ end -}} - -{{ if .HTTPSProxy }} -https_proxy "{{ .HTTPSProxy }}" -ENV['https_proxy'] = "{{ .HTTPSProxy }}" -ENV['HTTPS_PROXY'] = "{{ .HTTPSProxy }}" -{{ end -}} - -{{ if .NOProxy }} -no_proxy "{{ join .NOProxy "," }}" -ENV['no_proxy'] = "{{ join .NOProxy "," }}" -{{ end -}} - -{{ if .SSLVerifyMode }} -ssl_verify_mode {{ .SSLVerifyMode }} -{{- end -}} - -{{ if .DisableReporting }} -enable_reporting false -{{ end -}} - -{{ if .ClientOptions }} -{{ join .ClientOptions "\n" }} -{{ end }} -` - -type provisionFn func(terraform.UIOutput, communicator.Communicator) error - -type provisioner struct { - Attributes map[string]interface{} - Channel string - ClientOptions []string - DisableReporting bool - Environment string - FetchChefCertificates bool - LogToFile bool - UsePolicyfile bool - PolicyGroup string - PolicyName string - HTTPProxy string - HTTPSProxy string - MaxRetries int - NamedRunList string - NOProxy []string - NodeName string - OhaiHints []string - OSType string - RecreateClient bool - PreventSudo bool - RetryOnExitCode map[int]bool - RunList []string - SecretKey string - ServerURL string - SkipInstall bool - SkipRegister bool - SSLVerifyMode string - UserName string - UserKey string - Vaults map[string][]string - Version string - WaitForRetry time.Duration - - cleanupUserKeyCmd string - createConfigFiles provisionFn - installChefClient provisionFn - fetchChefCertificates provisionFn - generateClientKey provisionFn - configureVaults provisionFn - runChefClient provisionFn - useSudo bool -} - -// Provisioner returns a Chef provisioner -func Provisioner() terraform.ResourceProvisioner { - return &schema.Provisioner{ - Schema: map[string]*schema.Schema{ - "node_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "server_url": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "user_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "user_key": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "attributes_json": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "channel": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "stable", - }, - "client_options": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "disable_reporting": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "environment": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: defaultEnv, - }, - "fetch_chef_certificates": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "log_to_file": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "use_policyfile": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "policy_group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "policy_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "http_proxy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "https_proxy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "max_retries": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 0, - }, - "no_proxy": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "named_run_list": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "ohai_hints": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "os_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "prevent_sudo": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "recreate_client": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "retry_on_exit_code": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeInt}, - Optional: true, - }, - "run_list": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "secret_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "skip_install": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "skip_register": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "ssl_verify_mode": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "vault_json": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "wait_for_retry": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Default: 30, - }, - }, - - ApplyFunc: applyFn, - ValidateFunc: validateFn, - } -} - -// TODO: Support context cancelling (Provisioner Stop) -func applyFn(ctx context.Context) error { - o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) - s := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) - d := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) - - // Decode the provisioner config - p, err := decodeConfig(d) - if err != nil { - return err - } - - if p.OSType == "" { - switch t := s.Ephemeral.ConnInfo["type"]; t { - case "ssh", "": // The default connection type is ssh, so if the type is empty assume ssh - p.OSType = "linux" - case "winrm": - p.OSType = "windows" - default: - return fmt.Errorf("Unsupported connection type: %s", t) - } - } - - // Set some values based on the targeted OS - switch p.OSType { - case "linux": - p.cleanupUserKeyCmd = fmt.Sprintf("rm -f %s", path.Join(linuxConfDir, p.UserName+".pem")) - p.createConfigFiles = p.linuxCreateConfigFiles - p.installChefClient = p.linuxInstallChefClient - p.fetchChefCertificates = p.fetchChefCertificatesFunc(linuxKnifeCmd, linuxConfDir) - p.generateClientKey = p.generateClientKeyFunc(linuxKnifeCmd, linuxConfDir, linuxNoOutput) - p.configureVaults = p.configureVaultsFunc(linuxGemCmd, linuxKnifeCmd, linuxConfDir) - p.runChefClient = p.runChefClientFunc(linuxChefCmd, linuxConfDir) - p.useSudo = !p.PreventSudo && s.Ephemeral.ConnInfo["user"] != "root" - case "windows": - p.cleanupUserKeyCmd = fmt.Sprintf("cd %s && del /F /Q %s", windowsConfDir, p.UserName+".pem") - p.createConfigFiles = p.windowsCreateConfigFiles - p.installChefClient = p.windowsInstallChefClient - p.fetchChefCertificates = p.fetchChefCertificatesFunc(windowsKnifeCmd, windowsConfDir) - p.generateClientKey = p.generateClientKeyFunc(windowsKnifeCmd, windowsConfDir, windowsNoOutput) - p.configureVaults = p.configureVaultsFunc(windowsGemCmd, windowsKnifeCmd, windowsConfDir) - p.runChefClient = p.runChefClientFunc(windowsChefCmd, windowsConfDir) - p.useSudo = false - default: - return fmt.Errorf("Unsupported os type: %s", p.OSType) - } - - // Get a new communicator - comm, err := communicator.New(s) - if err != nil { - return err - } - - retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) - defer cancel() - - // Wait and retry until we establish the connection - err = communicator.Retry(retryCtx, func() error { - return comm.Connect(o) - }) - if err != nil { - return err - } - defer comm.Disconnect() - - // Make sure we always delete the user key from the new node! - var once sync.Once - cleanupUserKey := func() { - o.Output("Cleanup user key...") - if err := p.runCommand(o, comm, p.cleanupUserKeyCmd); err != nil { - o.Output("WARNING: Failed to cleanup user key on new node: " + err.Error()) - } - } - defer once.Do(cleanupUserKey) - - if !p.SkipInstall { - if err := p.installChefClient(o, comm); err != nil { - return err - } - } - - o.Output("Creating configuration files...") - if err := p.createConfigFiles(o, comm); err != nil { - return err - } - - if !p.SkipRegister { - if p.FetchChefCertificates { - o.Output("Fetch Chef certificates...") - if err := p.fetchChefCertificates(o, comm); err != nil { - return err - } - } - - o.Output("Generate the private key...") - if err := p.generateClientKey(o, comm); err != nil { - return err - } - } - - if p.Vaults != nil { - o.Output("Configure Chef vaults...") - if err := p.configureVaults(o, comm); err != nil { - return err - } - } - - // Cleanup the user key before we run Chef-Client to prevent issues - // with rights caused by changing settings during the run. - once.Do(cleanupUserKey) - - o.Output("Starting initial Chef-Client run...") - - for attempt := 0; attempt <= p.MaxRetries; attempt++ { - // We need a new retry context for each attempt, to make sure - // they all get the correct timeout. - retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) - defer cancel() - - // Make sure to (re)connect before trying to run Chef-Client. - if err := communicator.Retry(retryCtx, func() error { - return comm.Connect(o) - }); err != nil { - return err - } - - err = p.runChefClient(o, comm) - if err == nil { - return nil - } - - // Allow RFC062 Exit Codes: - // https://github.com/chef/chef-rfc/blob/master/rfc062-exit-status.md - exitError, ok := err.(*remote.ExitError) - if !ok { - return err - } - - switch exitError.ExitStatus { - case 35: - o.Output("Reboot has been scheduled in the run state") - err = nil - case 37: - o.Output("Reboot needs to be completed") - err = nil - case 213: - o.Output("Chef has exited during a client upgrade") - err = nil - } - - if !p.RetryOnExitCode[exitError.ExitStatus] { - return err - } - - if attempt < p.MaxRetries { - o.Output(fmt.Sprintf("Waiting %s before retrying Chef-Client run...", p.WaitForRetry)) - time.Sleep(p.WaitForRetry) - } - } - - return err -} - -func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { - usePolicyFile := false - if usePolicyFileRaw, ok := c.Get("use_policyfile"); ok { - switch usePolicyFileRaw := usePolicyFileRaw.(type) { - case bool: - usePolicyFile = usePolicyFileRaw - case string: - usePolicyFileBool, err := strconv.ParseBool(usePolicyFileRaw) - if err != nil { - return ws, append(es, errors.New("\"use_policyfile\" must be a boolean")) - } - usePolicyFile = usePolicyFileBool - default: - return ws, append(es, errors.New("\"use_policyfile\" must be a boolean")) - } - } - - if !usePolicyFile && !c.IsSet("run_list") { - es = append(es, errors.New("\"run_list\": required field is not set")) - } - if usePolicyFile && !c.IsSet("policy_name") { - es = append(es, errors.New("using policyfile, but \"policy_name\" not set")) - } - if usePolicyFile && !c.IsSet("policy_group") { - es = append(es, errors.New("using policyfile, but \"policy_group\" not set")) - } - - return ws, es -} - -func (p *provisioner) deployConfigFiles(o terraform.UIOutput, comm communicator.Communicator, confDir string) error { - // Copy the user key to the new instance - pk := strings.NewReader(p.UserKey) - if err := comm.Upload(path.Join(confDir, p.UserName+".pem"), pk); err != nil { - return fmt.Errorf("Uploading user key failed: %v", err) - } - - if p.SecretKey != "" { - // Copy the secret key to the new instance - s := strings.NewReader(p.SecretKey) - if err := comm.Upload(path.Join(confDir, secretKey), s); err != nil { - return fmt.Errorf("Uploading %s failed: %v", secretKey, err) - } - } - - // Make sure the SSLVerifyMode value is written as a symbol - if p.SSLVerifyMode != "" && !strings.HasPrefix(p.SSLVerifyMode, ":") { - p.SSLVerifyMode = fmt.Sprintf(":%s", p.SSLVerifyMode) - } - - // Make strings.Join available for use within the template - funcMap := template.FuncMap{ - "join": strings.Join, - } - - // Create a new template and parse the client config into it - t := template.Must(template.New(clienrb).Funcs(funcMap).Parse(clientConf)) - - var buf bytes.Buffer - err := t.Execute(&buf, p) - if err != nil { - return fmt.Errorf("Error executing %s template: %s", clienrb, err) - } - - // Copy the client config to the new instance - if err = comm.Upload(path.Join(confDir, clienrb), &buf); err != nil { - return fmt.Errorf("Uploading %s failed: %v", clienrb, err) - } - - // Create a map with first boot settings - fb := make(map[string]interface{}) - if p.Attributes != nil { - fb = p.Attributes - } - - // Check if the run_list was also in the attributes and if so log a warning - // that it will be overwritten with the value of the run_list argument. - if _, found := fb["run_list"]; found { - log.Printf("[WARN] Found a 'run_list' specified in the configured attributes! " + - "This value will be overwritten by the value of the `run_list` argument!") - } - - // Add the initial runlist to the first boot settings - if !p.UsePolicyfile { - fb["run_list"] = p.RunList - } - - // Marshal the first boot settings to JSON - d, err := json.Marshal(fb) - if err != nil { - return fmt.Errorf("Failed to create %s data: %s", firstBoot, err) - } - - // Copy the first-boot.json to the new instance - if err := comm.Upload(path.Join(confDir, firstBoot), bytes.NewReader(d)); err != nil { - return fmt.Errorf("Uploading %s failed: %v", firstBoot, err) - } - - return nil -} - -func (p *provisioner) deployOhaiHints(o terraform.UIOutput, comm communicator.Communicator, hintDir string) error { - for _, hint := range p.OhaiHints { - // Open the hint file - f, err := os.Open(hint) - if err != nil { - return err - } - defer f.Close() - - // Copy the hint to the new instance - if err := comm.Upload(path.Join(hintDir, path.Base(hint)), f); err != nil { - return fmt.Errorf("Uploading %s failed: %v", path.Base(hint), err) - } - } - - return nil -} - -func (p *provisioner) fetchChefCertificatesFunc( - knifeCmd string, - confDir string) func(terraform.UIOutput, communicator.Communicator) error { - return func(o terraform.UIOutput, comm communicator.Communicator) error { - clientrb := path.Join(confDir, clienrb) - cmd := fmt.Sprintf("%s ssl fetch -c %s", knifeCmd, clientrb) - - return p.runCommand(o, comm, cmd) - } -} - -func (p *provisioner) generateClientKeyFunc(knifeCmd string, confDir string, noOutput string) provisionFn { - return func(o terraform.UIOutput, comm communicator.Communicator) error { - options := fmt.Sprintf("-c %s -u %s --key %s", - path.Join(confDir, clienrb), - p.UserName, - path.Join(confDir, p.UserName+".pem"), - ) - - // See if we already have a node object - getNodeCmd := fmt.Sprintf("%s node show %s %s %s", knifeCmd, p.NodeName, options, noOutput) - node := p.runCommand(o, comm, getNodeCmd) == nil - - // See if we already have a client object - getClientCmd := fmt.Sprintf("%s client show %s %s %s", knifeCmd, p.NodeName, options, noOutput) - client := p.runCommand(o, comm, getClientCmd) == nil - - // If we have a client, we can only continue if we are to recreate the client - if client && !p.RecreateClient { - return fmt.Errorf( - "Chef client %q already exists, set recreate_client=true to automatically recreate the client", p.NodeName) - } - - // If the node exists, try to delete it - if node { - deleteNodeCmd := fmt.Sprintf("%s node delete %s -y %s", - knifeCmd, - p.NodeName, - options, - ) - if err := p.runCommand(o, comm, deleteNodeCmd); err != nil { - return err - } - } - - // If the client exists, try to delete it - if client { - deleteClientCmd := fmt.Sprintf("%s client delete %s -y %s", - knifeCmd, - p.NodeName, - options, - ) - if err := p.runCommand(o, comm, deleteClientCmd); err != nil { - return err - } - } - - // Create the new client object - createClientCmd := fmt.Sprintf("%s client create %s -d -f %s %s", - knifeCmd, - p.NodeName, - path.Join(confDir, "client.pem"), - options, - ) - - return p.runCommand(o, comm, createClientCmd) - } -} - -func (p *provisioner) configureVaultsFunc(gemCmd string, knifeCmd string, confDir string) provisionFn { - return func(o terraform.UIOutput, comm communicator.Communicator) error { - if err := p.runCommand(o, comm, fmt.Sprintf("%s install chef-vault", gemCmd)); err != nil { - return err - } - - options := fmt.Sprintf("-c %s -u %s --key %s", - path.Join(confDir, clienrb), - p.UserName, - path.Join(confDir, p.UserName+".pem"), - ) - - // if client gets recreated, remove (old) client (with old keys) from vaults/items - // otherwise, the (new) client (with new keys) will not be able to decrypt the vault - if p.RecreateClient { - for vault, items := range p.Vaults { - for _, item := range items { - deleteCmd := fmt.Sprintf("%s vault remove %s %s -C \"%s\" -M client %s", - knifeCmd, - vault, - item, - p.NodeName, - options, - ) - if err := p.runCommand(o, comm, deleteCmd); err != nil { - return err - } - } - } - } - - for vault, items := range p.Vaults { - for _, item := range items { - updateCmd := fmt.Sprintf("%s vault update %s %s -C %s -M client %s", - knifeCmd, - vault, - item, - p.NodeName, - options, - ) - if err := p.runCommand(o, comm, updateCmd); err != nil { - return err - } - } - } - - return nil - } -} - -func (p *provisioner) runChefClientFunc(chefCmd string, confDir string) provisionFn { - return func(o terraform.UIOutput, comm communicator.Communicator) error { - fb := path.Join(confDir, firstBoot) - var cmd string - - // Policyfiles do not support chef environments, so don't pass the `-E` flag. - switch { - case p.UsePolicyfile && p.NamedRunList == "": - cmd = fmt.Sprintf("%s -j %q", chefCmd, fb) - case p.UsePolicyfile && p.NamedRunList != "": - cmd = fmt.Sprintf("%s -j %q -n %q", chefCmd, fb, p.NamedRunList) - default: - cmd = fmt.Sprintf("%s -j %q -E %q", chefCmd, fb, p.Environment) - } - - if p.LogToFile { - if err := os.MkdirAll(logfileDir, 0755); err != nil { - return fmt.Errorf("Error creating logfile directory %s: %v", logfileDir, err) - } - - logFile := path.Join(logfileDir, p.NodeName) - f, err := os.Create(path.Join(logFile)) - if err != nil { - return fmt.Errorf("Error creating logfile %s: %v", logFile, err) - } - f.Close() - - o.Output("Writing Chef Client output to " + logFile) - o = p - } - - return p.runCommand(o, comm, cmd) - } -} - -// Output implementation of terraform.UIOutput interface -func (p *provisioner) Output(output string) { - logFile := path.Join(logfileDir, p.NodeName) - f, err := os.OpenFile(logFile, os.O_APPEND|os.O_WRONLY, 0666) - if err != nil { - log.Printf("Error creating logfile %s: %v", logFile, err) - return - } - defer f.Close() - - // These steps are needed to remove any ANSI escape codes used to colorize - // the output and to make sure we have proper line endings before writing - // the string to the logfile. - re := regexp.MustCompile(`\x1b\[[0-9;]+m`) - output = re.ReplaceAllString(output, "") - output = strings.Replace(output, "\r", "\n", -1) - - if _, err := f.WriteString(output); err != nil { - log.Printf("Error writing output to logfile %s: %v", logFile, err) - } - - if err := f.Sync(); err != nil { - log.Printf("Error saving logfile %s to disk: %v", logFile, err) - } -} - -// runCommand is used to run already prepared commands -func (p *provisioner) runCommand(o terraform.UIOutput, comm communicator.Communicator, command string) error { - // Unless prevented, prefix the command with sudo - if p.useSudo { - command = "sudo " + command - } - - outR, outW := io.Pipe() - errR, errW := io.Pipe() - go p.copyOutput(o, outR) - go p.copyOutput(o, errR) - defer outW.Close() - defer errW.Close() - - cmd := &remote.Cmd{ - Command: command, - Stdout: outW, - Stderr: errW, - } - - err := comm.Start(cmd) - if err != nil { - return fmt.Errorf("Error executing command %q: %v", cmd.Command, err) - } - - if err := cmd.Wait(); err != nil { - return err - } - - return nil -} - -func (p *provisioner) copyOutput(o terraform.UIOutput, r io.Reader) { - lr := linereader.New(r) - for line := range lr.Ch { - o.Output(line) - } -} - -func decodeConfig(d *schema.ResourceData) (*provisioner, error) { - p := &provisioner{ - Channel: d.Get("channel").(string), - ClientOptions: getStringList(d.Get("client_options")), - DisableReporting: d.Get("disable_reporting").(bool), - Environment: d.Get("environment").(string), - FetchChefCertificates: d.Get("fetch_chef_certificates").(bool), - LogToFile: d.Get("log_to_file").(bool), - UsePolicyfile: d.Get("use_policyfile").(bool), - PolicyGroup: d.Get("policy_group").(string), - PolicyName: d.Get("policy_name").(string), - HTTPProxy: d.Get("http_proxy").(string), - HTTPSProxy: d.Get("https_proxy").(string), - NOProxy: getStringList(d.Get("no_proxy")), - MaxRetries: d.Get("max_retries").(int), - NamedRunList: d.Get("named_run_list").(string), - NodeName: d.Get("node_name").(string), - OhaiHints: getStringList(d.Get("ohai_hints")), - OSType: d.Get("os_type").(string), - RecreateClient: d.Get("recreate_client").(bool), - PreventSudo: d.Get("prevent_sudo").(bool), - RetryOnExitCode: getRetryOnExitCodes(d), - RunList: getStringList(d.Get("run_list")), - SecretKey: d.Get("secret_key").(string), - ServerURL: d.Get("server_url").(string), - SkipInstall: d.Get("skip_install").(bool), - SkipRegister: d.Get("skip_register").(bool), - SSLVerifyMode: d.Get("ssl_verify_mode").(string), - UserName: d.Get("user_name").(string), - UserKey: d.Get("user_key").(string), - Version: d.Get("version").(string), - WaitForRetry: time.Duration(d.Get("wait_for_retry").(int)) * time.Second, - } - - // Make sure the supplied URL has a trailing slash - p.ServerURL = strings.TrimSuffix(p.ServerURL, "/") + "/" - - for i, hint := range p.OhaiHints { - hintPath, err := homedir.Expand(hint) - if err != nil { - return nil, fmt.Errorf("Error expanding the path %s: %v", hint, err) - } - p.OhaiHints[i] = hintPath - } - - if attrs, ok := d.GetOk("attributes_json"); ok { - var m map[string]interface{} - if err := json.Unmarshal([]byte(attrs.(string)), &m); err != nil { - return nil, fmt.Errorf("Error parsing attributes_json: %v", err) - } - p.Attributes = m - } - - if vaults, ok := d.GetOk("vault_json"); ok { - var m map[string]interface{} - if err := json.Unmarshal([]byte(vaults.(string)), &m); err != nil { - return nil, fmt.Errorf("Error parsing vault_json: %v", err) - } - - v := make(map[string][]string) - for vault, items := range m { - switch items := items.(type) { - case []interface{}: - for _, item := range items { - if item, ok := item.(string); ok { - v[vault] = append(v[vault], item) - } - } - case interface{}: - if item, ok := items.(string); ok { - v[vault] = append(v[vault], item) - } - } - } - - p.Vaults = v - } - - return p, nil -} - -func getRetryOnExitCodes(d *schema.ResourceData) map[int]bool { - result := make(map[int]bool) - - v, ok := d.GetOk("retry_on_exit_code") - if !ok || v == nil { - // Use default exit codes - result[35] = true - result[37] = true - result[213] = true - return result - } - - switch v := v.(type) { - case []interface{}: - for _, vv := range v { - if vv, ok := vv.(int); ok { - result[vv] = true - } - } - return result - default: - panic(fmt.Sprintf("Unsupported type: %T", v)) - } -} - -func getStringList(v interface{}) []string { - var result []string - - switch v := v.(type) { - case nil: - return result - case []interface{}: - for _, vv := range v { - if vv, ok := vv.(string); ok { - result = append(result, vv) - } - } - return result - default: - panic(fmt.Sprintf("Unsupported type: %T", v)) - } -} diff --git a/builtin/provisioners/chef/resource_provisioner_test.go b/builtin/provisioners/chef/resource_provisioner_test.go deleted file mode 100644 index bc9f24279..000000000 --- a/builtin/provisioners/chef/resource_provisioner_test.go +++ /dev/null @@ -1,435 +0,0 @@ -package chef - -import ( - "fmt" - "path" - "testing" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = Provisioner() -} - -func TestProvisioner(t *testing.T) { - if err := Provisioner().(*schema.Provisioner).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestResourceProvider_Validate_good(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "environment": "_default", - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) - } -} - -func TestResourceProvider_Validate_bad(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "invalid": "nope", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") - } -} - -// Test that the JSON attributes with an unknown value don't -// validate. -func TestResourceProvider_Validate_computedValues(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "environment": "_default", - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "attributes_json": hcl2shim.UnknownVariableValue, - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) - } -} - -func TestResourceProvider_runChefClient(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - ChefCmd string - ConfDir string - Commands map[string]bool - }{ - "Sudo": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - ChefCmd: linuxChefCmd, - - ConfDir: linuxConfDir, - - Commands: map[string]bool{ - fmt.Sprintf(`sudo %s -j %q -E "_default"`, - linuxChefCmd, - path.Join(linuxConfDir, "first-boot.json")): true, - }, - }, - - "NoSudo": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - ChefCmd: linuxChefCmd, - - ConfDir: linuxConfDir, - - Commands: map[string]bool{ - fmt.Sprintf(`%s -j %q -E "_default"`, - linuxChefCmd, - path.Join(linuxConfDir, "first-boot.json")): true, - }, - }, - - "Environment": { - Config: map[string]interface{}{ - "environment": "production", - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - ChefCmd: windowsChefCmd, - - ConfDir: windowsConfDir, - - Commands: map[string]bool{ - fmt.Sprintf(`%s -j %q -E "production"`, - windowsChefCmd, - path.Join(windowsConfDir, "first-boot.json")): true, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - p.runChefClient = p.runChefClientFunc(tc.ChefCmd, tc.ConfDir) - p.useSudo = !p.PreventSudo - - err = p.runChefClient(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -func TestResourceProvider_fetchChefCertificates(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - KnifeCmd string - ConfDir string - Commands map[string]bool - }{ - "Sudo": { - Config: map[string]interface{}{ - "fetch_chef_certificates": true, - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - KnifeCmd: linuxKnifeCmd, - - ConfDir: linuxConfDir, - - Commands: map[string]bool{ - fmt.Sprintf(`sudo %s ssl fetch -c %s`, - linuxKnifeCmd, - path.Join(linuxConfDir, "client.rb")): true, - }, - }, - - "NoSudo": { - Config: map[string]interface{}{ - "fetch_chef_certificates": true, - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - KnifeCmd: windowsKnifeCmd, - - ConfDir: windowsConfDir, - - Commands: map[string]bool{ - fmt.Sprintf(`%s ssl fetch -c %s`, - windowsKnifeCmd, - path.Join(windowsConfDir, "client.rb")): true, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - p.fetchChefCertificates = p.fetchChefCertificatesFunc(tc.KnifeCmd, tc.ConfDir) - p.useSudo = !p.PreventSudo - - err = p.fetchChefCertificates(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -func TestResourceProvider_configureVaults(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - GemCmd string - KnifeCmd string - ConfDir string - Commands map[string]bool - }{ - "Linux Vault string": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "vault_json": `{"vault1": "item1"}`, - }, - - GemCmd: linuxGemCmd, - KnifeCmd: linuxKnifeCmd, - ConfDir: linuxConfDir, - - Commands: map[string]bool{ - fmt.Sprintf("%s install chef-vault", linuxGemCmd): true, - fmt.Sprintf("%s vault update vault1 item1 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", linuxKnifeCmd, linuxConfDir, linuxConfDir): true, - }, - }, - - "Linux Vault []string": { - Config: map[string]interface{}{ - "fetch_chef_certificates": true, - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "vault_json": `{"vault1": ["item1", "item2"]}`, - }, - - GemCmd: linuxGemCmd, - KnifeCmd: linuxKnifeCmd, - ConfDir: linuxConfDir, - - Commands: map[string]bool{ - fmt.Sprintf("%s install chef-vault", linuxGemCmd): true, - fmt.Sprintf("%s vault update vault1 item1 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", linuxKnifeCmd, linuxConfDir, linuxConfDir): true, - fmt.Sprintf("%s vault update vault1 item2 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", linuxKnifeCmd, linuxConfDir, linuxConfDir): true, - }, - }, - - "Linux Vault []string (recreate-client for vault)": { - Config: map[string]interface{}{ - "fetch_chef_certificates": true, - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "vault_json": `{"vault1": ["item1", "item2"]}`, - "recreate_client": true, - }, - - GemCmd: linuxGemCmd, - KnifeCmd: linuxKnifeCmd, - ConfDir: linuxConfDir, - - Commands: map[string]bool{ - fmt.Sprintf("%s install chef-vault", linuxGemCmd): true, - fmt.Sprintf("%s vault remove vault1 item1 -C \"nodename1\" -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", linuxKnifeCmd, linuxConfDir, linuxConfDir): true, - fmt.Sprintf("%s vault remove vault1 item2 -C \"nodename1\" -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", linuxKnifeCmd, linuxConfDir, linuxConfDir): true, - fmt.Sprintf("%s vault update vault1 item1 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", linuxKnifeCmd, linuxConfDir, linuxConfDir): true, - fmt.Sprintf("%s vault update vault1 item2 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", linuxKnifeCmd, linuxConfDir, linuxConfDir): true, - }, - }, - - "Windows Vault string": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "vault_json": `{"vault1": "item1"}`, - }, - - GemCmd: windowsGemCmd, - KnifeCmd: windowsKnifeCmd, - ConfDir: windowsConfDir, - - Commands: map[string]bool{ - fmt.Sprintf("%s install chef-vault", windowsGemCmd): true, - fmt.Sprintf("%s vault update vault1 item1 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", windowsKnifeCmd, windowsConfDir, windowsConfDir): true, - }, - }, - - "Windows Vault []string": { - Config: map[string]interface{}{ - "fetch_chef_certificates": true, - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "vault_json": `{"vault1": ["item1", "item2"]}`, - }, - - GemCmd: windowsGemCmd, - KnifeCmd: windowsKnifeCmd, - ConfDir: windowsConfDir, - - Commands: map[string]bool{ - fmt.Sprintf("%s install chef-vault", windowsGemCmd): true, - fmt.Sprintf("%s vault update vault1 item1 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", windowsKnifeCmd, windowsConfDir, windowsConfDir): true, - fmt.Sprintf("%s vault update vault1 item2 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", windowsKnifeCmd, windowsConfDir, windowsConfDir): true, - }, - }, - - "Windows Vault [] string (recreate-client for vault)": { - Config: map[string]interface{}{ - "fetch_chef_certificates": true, - "node_name": "nodename1", - "prevent_sudo": true, - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "vault_json": `{"vault1": ["item1", "item2"]}`, - "recreate_client": true, - }, - - GemCmd: windowsGemCmd, - KnifeCmd: windowsKnifeCmd, - ConfDir: windowsConfDir, - - Commands: map[string]bool{ - fmt.Sprintf("%s install chef-vault", windowsGemCmd): true, - fmt.Sprintf("%s vault remove vault1 item1 -C \"nodename1\" -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", windowsKnifeCmd, windowsConfDir, windowsConfDir): true, - fmt.Sprintf("%s vault remove vault1 item2 -C \"nodename1\" -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", windowsKnifeCmd, windowsConfDir, windowsConfDir): true, - fmt.Sprintf("%s vault update vault1 item1 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", windowsKnifeCmd, windowsConfDir, windowsConfDir): true, - fmt.Sprintf("%s vault update vault1 item2 -C nodename1 -M client -c %s/client.rb "+ - "-u bob --key %s/bob.pem", windowsKnifeCmd, windowsConfDir, windowsConfDir): true, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - p.configureVaults = p.configureVaultsFunc(tc.GemCmd, tc.KnifeCmd, tc.ConfDir) - p.useSudo = !p.PreventSudo - - err = p.configureVaults(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -func testConfig(t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { - return terraform.NewResourceConfigRaw(c) -} diff --git a/builtin/provisioners/chef/testdata/ohaihint.json b/builtin/provisioners/chef/testdata/ohaihint.json deleted file mode 100644 index 9aabc3287..000000000 --- a/builtin/provisioners/chef/testdata/ohaihint.json +++ /dev/null @@ -1 +0,0 @@ -OHAI-HINT-FILE diff --git a/builtin/provisioners/chef/windows_provisioner.go b/builtin/provisioners/chef/windows_provisioner.go deleted file mode 100644 index 02010acdf..000000000 --- a/builtin/provisioners/chef/windows_provisioner.go +++ /dev/null @@ -1,84 +0,0 @@ -package chef - -import ( - "fmt" - "path" - "strings" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/terraform" -) - -const installScript = ` -$winver = [System.Environment]::OSVersion.Version | %% {"{0}.{1}" -f $_.Major,$_.Minor} - -switch ($winver) -{ - "6.0" {$machine_os = "2008"} - "6.1" {$machine_os = "2008r2"} - "6.2" {$machine_os = "2012"} - "6.3" {$machine_os = "2012"} - default {$machine_os = "2008r2"} -} - -if ([System.IntPtr]::Size -eq 4) {$machine_arch = "i686"} else {$machine_arch = "x86_64"} - -$url = "http://omnitruck.chef.io/%s/chef/download?p=windows&pv=$machine_os&m=$machine_arch&v=%s" -$dest = [System.IO.Path]::GetTempFileName() -$dest = [System.IO.Path]::ChangeExtension($dest, ".msi") -$downloader = New-Object System.Net.WebClient - -$http_proxy = '%s' -if ($http_proxy -ne '') { - $no_proxy = '%s' - if ($no_proxy -eq ''){ - $no_proxy = "127.0.0.1" - } - - $proxy = New-Object System.Net.WebProxy($http_proxy, $true, ,$no_proxy.Split(',')) - $downloader.proxy = $proxy -} - -Write-Host 'Downloading Chef Client...' -$downloader.DownloadFile($url, $dest) - -Write-Host 'Installing Chef Client...' -Start-Process -FilePath msiexec -ArgumentList /qn, /i, $dest -Wait -` - -func (p *provisioner) windowsInstallChefClient(o terraform.UIOutput, comm communicator.Communicator) error { - script := path.Join(path.Dir(comm.ScriptPath()), "ChefClient.ps1") - content := fmt.Sprintf(installScript, p.Channel, p.Version, p.HTTPProxy, strings.Join(p.NOProxy, ",")) - - // Copy the script to the new instance - if err := comm.UploadScript(script, strings.NewReader(content)); err != nil { - return fmt.Errorf("Uploading client.rb failed: %v", err) - } - - // Execute the script to install Chef Client - installCmd := fmt.Sprintf("powershell -NoProfile -ExecutionPolicy Bypass -File %s", script) - return p.runCommand(o, comm, installCmd) -} - -func (p *provisioner) windowsCreateConfigFiles(o terraform.UIOutput, comm communicator.Communicator) error { - // Make sure the config directory exists - cmd := fmt.Sprintf("cmd /c if not exist %q mkdir %q", windowsConfDir, windowsConfDir) - if err := p.runCommand(o, comm, cmd); err != nil { - return err - } - - if len(p.OhaiHints) > 0 { - // Make sure the hits directory exists - hintsDir := path.Join(windowsConfDir, "ohai/hints") - cmd := fmt.Sprintf("cmd /c if not exist %q mkdir %q", hintsDir, hintsDir) - if err := p.runCommand(o, comm, cmd); err != nil { - return err - } - - if err := p.deployOhaiHints(o, comm, hintsDir); err != nil { - return err - } - } - - return p.deployConfigFiles(o, comm, windowsConfDir) -} diff --git a/builtin/provisioners/chef/windows_provisioner_test.go b/builtin/provisioners/chef/windows_provisioner_test.go deleted file mode 100644 index 603d92409..000000000 --- a/builtin/provisioners/chef/windows_provisioner_test.go +++ /dev/null @@ -1,394 +0,0 @@ -package chef - -import ( - "fmt" - "path" - "testing" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvider_windowsInstallChefClient(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - UploadScripts map[string]string - }{ - "Default": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "powershell -NoProfile -ExecutionPolicy Bypass -File ChefClient.ps1": true, - }, - - UploadScripts: map[string]string{ - "ChefClient.ps1": defaultWindowsInstallScript, - }, - }, - - "Proxy": { - Config: map[string]interface{}{ - "http_proxy": "http://proxy.local", - "no_proxy": []interface{}{"http://local.local", "http://local.org"}, - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - "powershell -NoProfile -ExecutionPolicy Bypass -File ChefClient.ps1": true, - }, - - UploadScripts: map[string]string{ - "ChefClient.ps1": proxyWindowsInstallScript, - }, - }, - - "Version": { - Config: map[string]interface{}{ - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "version": "11.18.6", - }, - - Commands: map[string]bool{ - "powershell -NoProfile -ExecutionPolicy Bypass -File ChefClient.ps1": true, - }, - - UploadScripts: map[string]string{ - "ChefClient.ps1": versionWindowsInstallScript, - }, - }, - - "Channel": { - Config: map[string]interface{}{ - "channel": "current", - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - "version": "11.18.6", - }, - - Commands: map[string]bool{ - "powershell -NoProfile -ExecutionPolicy Bypass -File ChefClient.ps1": true, - }, - - UploadScripts: map[string]string{ - "ChefClient.ps1": channelWindowsInstallScript, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - c.UploadScripts = tc.UploadScripts - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - p.useSudo = false - - err = p.windowsInstallChefClient(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -func TestResourceProvider_windowsCreateConfigFiles(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - Uploads map[string]string - }{ - "Default": { - Config: map[string]interface{}{ - "ohai_hints": []interface{}{"testdata/ohaihint.json"}, - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "secret_key": "SECRET-KEY", - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - fmt.Sprintf("cmd /c if not exist %q mkdir %q", windowsConfDir, windowsConfDir): true, - fmt.Sprintf("cmd /c if not exist %q mkdir %q", - path.Join(windowsConfDir, "ohai/hints"), - path.Join(windowsConfDir, "ohai/hints")): true, - }, - - Uploads: map[string]string{ - windowsConfDir + "/client.rb": defaultWindowsClientConf, - windowsConfDir + "/encrypted_data_bag_secret": "SECRET-KEY", - windowsConfDir + "/first-boot.json": `{"run_list":["cookbook::recipe"]}`, - windowsConfDir + "/ohai/hints/ohaihint.json": "OHAI-HINT-FILE", - windowsConfDir + "/bob.pem": "USER-KEY", - }, - }, - - "Proxy": { - Config: map[string]interface{}{ - "http_proxy": "http://proxy.local", - "https_proxy": "https://proxy.local", - "no_proxy": []interface{}{"http://local.local", "https://local.local"}, - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "secret_key": "SECRET-KEY", - "server_url": "https://chef.local", - "ssl_verify_mode": "verify_none", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - fmt.Sprintf("cmd /c if not exist %q mkdir %q", windowsConfDir, windowsConfDir): true, - }, - - Uploads: map[string]string{ - windowsConfDir + "/client.rb": proxyWindowsClientConf, - windowsConfDir + "/first-boot.json": `{"run_list":["cookbook::recipe"]}`, - windowsConfDir + "/encrypted_data_bag_secret": "SECRET-KEY", - windowsConfDir + "/bob.pem": "USER-KEY", - }, - }, - - "Attributes JSON": { - Config: map[string]interface{}{ - "attributes_json": `{"key1":{"subkey1":{"subkey2a":["val1","val2","val3"],` + - `"subkey2b":{"subkey3":"value3"}}},"key2":"value2"}`, - "node_name": "nodename1", - "run_list": []interface{}{"cookbook::recipe"}, - "secret_key": "SECRET-KEY", - "server_url": "https://chef.local", - "user_name": "bob", - "user_key": "USER-KEY", - }, - - Commands: map[string]bool{ - fmt.Sprintf("cmd /c if not exist %q mkdir %q", windowsConfDir, windowsConfDir): true, - }, - - Uploads: map[string]string{ - windowsConfDir + "/client.rb": defaultWindowsClientConf, - windowsConfDir + "/encrypted_data_bag_secret": "SECRET-KEY", - windowsConfDir + "/bob.pem": "USER-KEY", - windowsConfDir + "/first-boot.json": `{"key1":{"subkey1":{"subkey2a":["val1","val2","val3"],` + - `"subkey2b":{"subkey3":"value3"}}},"key2":"value2","run_list":["cookbook::recipe"]}`, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - c.Uploads = tc.Uploads - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - p.useSudo = false - - err = p.windowsCreateConfigFiles(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -const defaultWindowsInstallScript = ` -$winver = [System.Environment]::OSVersion.Version | % {"{0}.{1}" -f $_.Major,$_.Minor} - -switch ($winver) -{ - "6.0" {$machine_os = "2008"} - "6.1" {$machine_os = "2008r2"} - "6.2" {$machine_os = "2012"} - "6.3" {$machine_os = "2012"} - default {$machine_os = "2008r2"} -} - -if ([System.IntPtr]::Size -eq 4) {$machine_arch = "i686"} else {$machine_arch = "x86_64"} - -$url = "http://omnitruck.chef.io/stable/chef/download?p=windows&pv=$machine_os&m=$machine_arch&v=" -$dest = [System.IO.Path]::GetTempFileName() -$dest = [System.IO.Path]::ChangeExtension($dest, ".msi") -$downloader = New-Object System.Net.WebClient - -$http_proxy = '' -if ($http_proxy -ne '') { - $no_proxy = '' - if ($no_proxy -eq ''){ - $no_proxy = "127.0.0.1" - } - - $proxy = New-Object System.Net.WebProxy($http_proxy, $true, ,$no_proxy.Split(',')) - $downloader.proxy = $proxy -} - -Write-Host 'Downloading Chef Client...' -$downloader.DownloadFile($url, $dest) - -Write-Host 'Installing Chef Client...' -Start-Process -FilePath msiexec -ArgumentList /qn, /i, $dest -Wait -` - -const proxyWindowsInstallScript = ` -$winver = [System.Environment]::OSVersion.Version | % {"{0}.{1}" -f $_.Major,$_.Minor} - -switch ($winver) -{ - "6.0" {$machine_os = "2008"} - "6.1" {$machine_os = "2008r2"} - "6.2" {$machine_os = "2012"} - "6.3" {$machine_os = "2012"} - default {$machine_os = "2008r2"} -} - -if ([System.IntPtr]::Size -eq 4) {$machine_arch = "i686"} else {$machine_arch = "x86_64"} - -$url = "http://omnitruck.chef.io/stable/chef/download?p=windows&pv=$machine_os&m=$machine_arch&v=" -$dest = [System.IO.Path]::GetTempFileName() -$dest = [System.IO.Path]::ChangeExtension($dest, ".msi") -$downloader = New-Object System.Net.WebClient - -$http_proxy = 'http://proxy.local' -if ($http_proxy -ne '') { - $no_proxy = 'http://local.local,http://local.org' - if ($no_proxy -eq ''){ - $no_proxy = "127.0.0.1" - } - - $proxy = New-Object System.Net.WebProxy($http_proxy, $true, ,$no_proxy.Split(',')) - $downloader.proxy = $proxy -} - -Write-Host 'Downloading Chef Client...' -$downloader.DownloadFile($url, $dest) - -Write-Host 'Installing Chef Client...' -Start-Process -FilePath msiexec -ArgumentList /qn, /i, $dest -Wait -` - -const versionWindowsInstallScript = ` -$winver = [System.Environment]::OSVersion.Version | % {"{0}.{1}" -f $_.Major,$_.Minor} - -switch ($winver) -{ - "6.0" {$machine_os = "2008"} - "6.1" {$machine_os = "2008r2"} - "6.2" {$machine_os = "2012"} - "6.3" {$machine_os = "2012"} - default {$machine_os = "2008r2"} -} - -if ([System.IntPtr]::Size -eq 4) {$machine_arch = "i686"} else {$machine_arch = "x86_64"} - -$url = "http://omnitruck.chef.io/stable/chef/download?p=windows&pv=$machine_os&m=$machine_arch&v=11.18.6" -$dest = [System.IO.Path]::GetTempFileName() -$dest = [System.IO.Path]::ChangeExtension($dest, ".msi") -$downloader = New-Object System.Net.WebClient - -$http_proxy = '' -if ($http_proxy -ne '') { - $no_proxy = '' - if ($no_proxy -eq ''){ - $no_proxy = "127.0.0.1" - } - - $proxy = New-Object System.Net.WebProxy($http_proxy, $true, ,$no_proxy.Split(',')) - $downloader.proxy = $proxy -} - -Write-Host 'Downloading Chef Client...' -$downloader.DownloadFile($url, $dest) - -Write-Host 'Installing Chef Client...' -Start-Process -FilePath msiexec -ArgumentList /qn, /i, $dest -Wait -` -const channelWindowsInstallScript = ` -$winver = [System.Environment]::OSVersion.Version | % {"{0}.{1}" -f $_.Major,$_.Minor} - -switch ($winver) -{ - "6.0" {$machine_os = "2008"} - "6.1" {$machine_os = "2008r2"} - "6.2" {$machine_os = "2012"} - "6.3" {$machine_os = "2012"} - default {$machine_os = "2008r2"} -} - -if ([System.IntPtr]::Size -eq 4) {$machine_arch = "i686"} else {$machine_arch = "x86_64"} - -$url = "http://omnitruck.chef.io/current/chef/download?p=windows&pv=$machine_os&m=$machine_arch&v=11.18.6" -$dest = [System.IO.Path]::GetTempFileName() -$dest = [System.IO.Path]::ChangeExtension($dest, ".msi") -$downloader = New-Object System.Net.WebClient - -$http_proxy = '' -if ($http_proxy -ne '') { - $no_proxy = '' - if ($no_proxy -eq ''){ - $no_proxy = "127.0.0.1" - } - - $proxy = New-Object System.Net.WebProxy($http_proxy, $true, ,$no_proxy.Split(',')) - $downloader.proxy = $proxy -} - -Write-Host 'Downloading Chef Client...' -$downloader.DownloadFile($url, $dest) - -Write-Host 'Installing Chef Client...' -Start-Process -FilePath msiexec -ArgumentList /qn, /i, $dest -Wait -` - -const defaultWindowsClientConf = `log_location STDOUT -chef_server_url "https://chef.local/" -node_name "nodename1"` - -const proxyWindowsClientConf = `log_location STDOUT -chef_server_url "https://chef.local/" -node_name "nodename1" - -http_proxy "http://proxy.local" -ENV['http_proxy'] = "http://proxy.local" -ENV['HTTP_PROXY'] = "http://proxy.local" - -https_proxy "https://proxy.local" -ENV['https_proxy'] = "https://proxy.local" -ENV['HTTPS_PROXY'] = "https://proxy.local" - -no_proxy "http://local.local,https://local.local" -ENV['no_proxy'] = "http://local.local,https://local.local" - -ssl_verify_mode :verify_none` diff --git a/builtin/provisioners/file/resource_provisioner.go b/builtin/provisioners/file/resource_provisioner.go index 26f2f4daf..b96c03121 100644 --- a/builtin/provisioners/file/resource_provisioner.go +++ b/builtin/provisioners/file/resource_provisioner.go @@ -2,96 +2,129 @@ package file import ( "context" + "errors" "fmt" "io/ioutil" "os" "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" ) -func Provisioner() terraform.ResourceProvisioner { - return &schema.Provisioner{ - Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"content"}, - }, - - "content": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"source"}, - }, - - "destination": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - - ApplyFunc: applyFn, - ValidateFunc: validateFn, +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, } } -func applyFn(ctx context.Context) error { - connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) - data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} - // Get a new communicator - comm, err := communicator.New(connState) +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "source": { + Type: cty.String, + Optional: true, + }, + + "content": { + Type: cty.String, + Optional: true, + }, + + "destination": { + Type: cty.String, + Required: true, + }, + }, + } + resp.Provisioner = schema + return resp +} + +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + cfg, err := p.GetSchema().Provisioner.CoerceValue(req.Config) if err != nil { - return err + resp.Diagnostics = resp.Diagnostics.Append(err) + } + + source := cfg.GetAttr("source") + content := cfg.GetAttr("content") + + switch { + case !source.IsNull() && !content.IsNull(): + resp.Diagnostics = resp.Diagnostics.Append(errors.New("Cannot set both 'source' and 'content'")) + return resp + case source.IsNull() && content.IsNull(): + resp.Diagnostics = resp.Diagnostics.Append(errors.New("Must provide one of 'source' or 'content'")) + return resp + } + + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + comm, err := communicator.New(req.Connection) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp } // Get the source - src, deleteSource, err := getSrc(data) + src, deleteSource, err := getSrc(req.Config) if err != nil { - return err + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp } if deleteSource { defer os.Remove(src) } // Begin the file copy - dst := data.Get("destination").(string) - - if err := copyFiles(ctx, comm, src, dst); err != nil { - return err - } - return nil -} - -func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { - if !c.IsSet("source") && !c.IsSet("content") { - es = append(es, fmt.Errorf("Must provide one of 'source' or 'content'")) + dst := req.Config.GetAttr("destination").AsString() + if err := copyFiles(p.ctx, comm, src, dst); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp } - return ws, es + return resp } // getSrc returns the file to use as source -func getSrc(data *schema.ResourceData) (string, bool, error) { - src := data.Get("source").(string) - if content, ok := data.GetOk("content"); ok { +func getSrc(v cty.Value) (string, bool, error) { + content := v.GetAttr("content") + src := v.GetAttr("source") + + switch { + case !content.IsNull(): file, err := ioutil.TempFile("", "tf-file-content") if err != nil { return "", true, err } - if _, err = file.WriteString(content.(string)); err != nil { + if _, err = file.WriteString(content.AsString()); err != nil { return "", true, err } return file.Name(), true, nil - } - expansion, err := homedir.Expand(src) - return expansion, false, err + case !src.IsNull(): + expansion, err := homedir.Expand(src.AsString()) + return expansion, false, err + + default: + panic("source and content cannot both be null") + } } // copyFiles is used to copy the files from a source to a destination @@ -138,5 +171,15 @@ func copyFiles(ctx context.Context, comm communicator.Communicator, src, dst str if err != nil { return fmt.Errorf("Upload failed: %v", err) } + return err } + +func (p *provisioner) Stop() error { + p.cancel() + return nil +} + +func (p *provisioner) Close() error { + return nil +} diff --git a/builtin/provisioners/file/resource_provisioner_test.go b/builtin/provisioners/file/resource_provisioner_test.go index c7e34c0ab..080c76a74 100644 --- a/builtin/provisioners/file/resource_provisioner_test.go +++ b/builtin/provisioners/file/resource_provisioner_test.go @@ -3,110 +3,102 @@ package file import ( "testing" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" ) -func TestResourceProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = Provisioner() -} - -func TestProvisioner(t *testing.T) { - if err := Provisioner().(*schema.Provisioner).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - func TestResourceProvider_Validate_good_source(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "source": "/tmp/foo", - "destination": "/tmp/bar", + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("/tmp/foo"), + "destination": cty.StringVal("/tmp/bar"), }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) } } func TestResourceProvider_Validate_good_content(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "content": "value to copy", - "destination": "/tmp/bar", + v := cty.ObjectVal(map[string]cty.Value{ + "content": cty.StringVal("value to copy"), + "destination": cty.StringVal("/tmp/bar"), }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) } } func TestResourceProvider_Validate_good_unknown_variable_value(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "content": hcl2shim.UnknownVariableValue, - "destination": "/tmp/bar", + v := cty.ObjectVal(map[string]cty.Value{ + "content": cty.UnknownVal(cty.String), + "destination": cty.StringVal("/tmp/bar"), }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) } } func TestResourceProvider_Validate_bad_not_destination(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "source": "nope", + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("nope"), }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") } } func TestResourceProvider_Validate_bad_no_source(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "destination": "/tmp/bar", + v := cty.ObjectVal(map[string]cty.Value{ + "destination": cty.StringVal("/tmp/bar"), }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") } } func TestResourceProvider_Validate_bad_to_many_src(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "source": "nope", - "content": "value to copy", - "destination": "/tmp/bar", + v := cty.ObjectVal(map[string]cty.Value{ + "source": cty.StringVal("nope"), + "content": cty.StringVal("vlue to copy"), + "destination": cty.StringVal("/tmp/bar"), }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: v, + }) + + if !resp.Diagnostics.HasErrors() { + t.Fatal("Should have errors") } } -func testConfig(t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { - return terraform.NewResourceConfigRaw(c) +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() } diff --git a/builtin/provisioners/habitat/linux_provisioner.go b/builtin/provisioners/habitat/linux_provisioner.go deleted file mode 100644 index 414be6927..000000000 --- a/builtin/provisioners/habitat/linux_provisioner.go +++ /dev/null @@ -1,377 +0,0 @@ -package habitat - -import ( - "bytes" - "errors" - "fmt" - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/terraform" - "path" - "path/filepath" - "strings" - "text/template" -) - -const installURL = "https://raw.githubusercontent.com/habitat-sh/habitat/master/components/hab/install.sh" -const systemdUnit = `[Unit] -Description=Habitat Supervisor - -[Service] -ExecStart=/bin/hab sup run{{ .SupOptions }} -Restart=on-failure -{{ if .GatewayAuthToken -}} -Environment="HAB_SUP_GATEWAY_AUTH_TOKEN={{ .GatewayAuthToken }}" -{{ end -}} -{{ if .BuilderAuthToken -}} -Environment="HAB_AUTH_TOKEN={{ .BuilderAuthToken }}" -{{ end -}} - -[Install] -WantedBy=default.target -` - -func (p *provisioner) linuxInstallHabitat(o terraform.UIOutput, comm communicator.Communicator) error { - // Download the hab installer - if err := p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("curl --silent -L0 %s > install.sh", installURL))); err != nil { - return err - } - - // Run the install script - var command string - if p.Version == "" { - command = fmt.Sprintf("bash ./install.sh ") - } else { - command = fmt.Sprintf("bash ./install.sh -v %s", p.Version) - } - - if err := p.runCommand(o, comm, p.linuxGetCommand(command)); err != nil { - return err - } - - // Accept the license - if p.AcceptLicense { - var cmd string - - if p.UseSudo == true { - cmd = "env HAB_LICENSE=accept sudo -E /bin/bash -c 'hab -V'" - } else { - cmd = "env HAB_LICENSE=accept /bin/bash -c 'hab -V'" - } - - if err := p.runCommand(o, comm, cmd); err != nil { - return err - } - } - - // Create the hab user - if err := p.createHabUser(o, comm); err != nil { - return err - } - - // Cleanup the installer - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("rm -f install.sh"))) -} - -func (p *provisioner) createHabUser(o terraform.UIOutput, comm communicator.Communicator) error { - var addUser bool - - // Install busybox to get us the user tools we need - if err := p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("hab install core/busybox"))); err != nil { - return err - } - - // Check for existing hab user - if err := p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("hab pkg exec core/busybox id hab"))); err != nil { - o.Output("No existing hab user detected, creating...") - addUser = true - } - - if addUser { - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("hab pkg exec core/busybox adduser -D -g \"\" hab"))) - } - - return nil -} - -func (p *provisioner) linuxStartHabitat(o terraform.UIOutput, comm communicator.Communicator) error { - // Install the supervisor first - var command string - if p.Version == "" { - command += p.linuxGetCommand(fmt.Sprintf("hab install core/hab-sup")) - } else { - command += p.linuxGetCommand(fmt.Sprintf("hab install core/hab-sup/%s", p.Version)) - } - - if err := p.runCommand(o, comm, command); err != nil { - return err - } - - // Build up supervisor options - options := "" - if p.PermanentPeer { - options += " --permanent-peer" - } - - if p.ListenCtl != "" { - options += fmt.Sprintf(" --listen-ctl %s", p.ListenCtl) - } - - if p.ListenGossip != "" { - options += fmt.Sprintf(" --listen-gossip %s", p.ListenGossip) - } - - if p.ListenHTTP != "" { - options += fmt.Sprintf(" --listen-http %s", p.ListenHTTP) - } - - if p.Peer != "" { - options += fmt.Sprintf(" %s", p.Peer) - } - - if len(p.Peers) > 0 { - if len(p.Peers) == 1 { - options += fmt.Sprintf(" --peer %s", p.Peers[0]) - } else { - options += fmt.Sprintf(" --peer %s", strings.Join(p.Peers, " --peer ")) - } - } - - if p.RingKey != "" { - options += fmt.Sprintf(" --ring %s", p.RingKey) - } - - if p.URL != "" { - options += fmt.Sprintf(" --url %s", p.URL) - } - - if p.Channel != "" { - options += fmt.Sprintf(" --channel %s", p.Channel) - } - - if p.Events != "" { - options += fmt.Sprintf(" --events %s", p.Events) - } - - if p.Organization != "" { - options += fmt.Sprintf(" --org %s", p.Organization) - } - - if p.HttpDisable == true { - options += fmt.Sprintf(" --http-disable") - } - - if p.AutoUpdate == true { - options += fmt.Sprintf(" --auto-update") - } - - p.SupOptions = options - - // Start hab depending on service type - switch p.ServiceType { - case "unmanaged": - return p.linuxStartHabitatUnmanaged(o, comm, options) - case "systemd": - return p.linuxStartHabitatSystemd(o, comm, options) - default: - return errors.New("unsupported service type") - } -} - -// This func is a little different than the others since we need to expose HAB_AUTH_TOKEN to a shell -// sub-process that's actually running the supervisor. -func (p *provisioner) linuxStartHabitatUnmanaged(o terraform.UIOutput, comm communicator.Communicator, options string) error { - var token string - - // Create the sup directory for the log file - if err := p.runCommand(o, comm, p.linuxGetCommand("mkdir -p /hab/sup/default && chmod o+w /hab/sup/default")); err != nil { - return err - } - - // Set HAB_AUTH_TOKEN if provided - if p.BuilderAuthToken != "" { - token = fmt.Sprintf("env HAB_AUTH_TOKEN=%s ", p.BuilderAuthToken) - } - - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("(%ssetsid hab sup run%s > /hab/sup/default/sup.log 2>&1 <&1 &) ; sleep 1", token, options))) -} - -func (p *provisioner) linuxStartHabitatSystemd(o terraform.UIOutput, comm communicator.Communicator, options string) error { - // Create a new template and parse the client config into it - unitString := template.Must(template.New("hab-supervisor.service").Parse(systemdUnit)) - - var buf bytes.Buffer - err := unitString.Execute(&buf, p) - if err != nil { - return fmt.Errorf("error executing %s.service template: %s", p.ServiceName, err) - } - - if err := p.linuxUploadSystemdUnit(o, comm, &buf); err != nil { - return err - } - - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("systemctl enable %s && systemctl start %s", p.ServiceName, p.ServiceName))) -} - -func (p *provisioner) linuxUploadSystemdUnit(o terraform.UIOutput, comm communicator.Communicator, contents *bytes.Buffer) error { - destination := fmt.Sprintf("/etc/systemd/system/%s.service", p.ServiceName) - - if p.UseSudo { - tempPath := fmt.Sprintf("/tmp/%s.service", p.ServiceName) - if err := comm.Upload(tempPath, contents); err != nil { - return err - } - - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("mv %s %s", tempPath, destination))) - } - - return comm.Upload(destination, contents) -} - -func (p *provisioner) linuxUploadRingKey(o terraform.UIOutput, comm communicator.Communicator) error { - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf(`echo -e "%s" | hab ring key import`, p.RingKeyContent))) -} - -func (p *provisioner) linuxUploadCtlSecret(o terraform.UIOutput, comm communicator.Communicator) error { - destination := fmt.Sprintf("/hab/sup/default/CTL_SECRET") - // Create the destination directory - err := p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("mkdir -p %s", filepath.Dir(destination)))) - if err != nil { - return err - } - - keyContent := strings.NewReader(p.CtlSecret) - if p.UseSudo { - tempPath := fmt.Sprintf("/tmp/CTL_SECRET") - if err := comm.Upload(tempPath, keyContent); err != nil { - return err - } - - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("chown root:root %s && chmod 0600 %s && mv %s %s", tempPath, tempPath, tempPath, destination))) - } - - return comm.Upload(destination, keyContent) -} - -// -// Habitat Services -// -func (p *provisioner) linuxStartHabitatService(o terraform.UIOutput, comm communicator.Communicator, service Service) error { - var options string - - if err := p.linuxInstallHabitatPackage(o, comm, service); err != nil { - return err - } - if err := p.uploadUserTOML(o, comm, service); err != nil { - return err - } - - // Upload service group key - if service.ServiceGroupKey != "" { - err := p.uploadServiceGroupKey(o, comm, service.ServiceGroupKey) - if err != nil { - return err - } - } - - if service.Topology != "" { - options += fmt.Sprintf(" --topology %s", service.Topology) - } - - if service.Strategy != "" { - options += fmt.Sprintf(" --strategy %s", service.Strategy) - } - - if service.Channel != "" { - options += fmt.Sprintf(" --channel %s", service.Channel) - } - - if service.URL != "" { - options += fmt.Sprintf(" --url %s", service.URL) - } - - if service.Group != "" { - options += fmt.Sprintf(" --group %s", service.Group) - } - - for _, bind := range service.Binds { - options += fmt.Sprintf(" --bind %s", bind.toBindString()) - } - - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("hab svc load %s %s", service.Name, options))) -} - -// In the future we'll remove the dedicated install once the synchronous load feature in hab-sup is -// available. Until then we install here to provide output and a noisy failure mechanism because -// if you install with the pkg load, it occurs asynchronously and fails quietly. -func (p *provisioner) linuxInstallHabitatPackage(o terraform.UIOutput, comm communicator.Communicator, service Service) error { - var options string - - if service.Channel != "" { - options += fmt.Sprintf(" --channel %s", service.Channel) - } - - if service.URL != "" { - options += fmt.Sprintf(" --url %s", service.URL) - } - - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("hab pkg install %s %s", service.Name, options))) -} - -func (p *provisioner) uploadServiceGroupKey(o terraform.UIOutput, comm communicator.Communicator, key string) error { - keyName := strings.Split(key, "\n")[1] - o.Output("Uploading service group key: " + keyName) - keyFileName := fmt.Sprintf("%s.box.key", keyName) - destPath := path.Join("/hab/cache/keys", keyFileName) - keyContent := strings.NewReader(key) - if p.UseSudo { - tempPath := path.Join("/tmp", keyFileName) - if err := comm.Upload(tempPath, keyContent); err != nil { - return err - } - - return p.runCommand(o, comm, p.linuxGetCommand(fmt.Sprintf("mv %s %s", tempPath, destPath))) - } - - return comm.Upload(destPath, keyContent) -} - -func (p *provisioner) uploadUserTOML(o terraform.UIOutput, comm communicator.Communicator, service Service) error { - // Create the hab svc directory to lay down the user.toml before loading the service - o.Output("Uploading user.toml for service: " + service.Name) - destDir := fmt.Sprintf("/hab/user/%s/config", service.getPackageName(service.Name)) - command := p.linuxGetCommand(fmt.Sprintf("mkdir -p %s", destDir)) - if err := p.runCommand(o, comm, command); err != nil { - return err - } - - userToml := strings.NewReader(service.UserTOML) - - if p.UseSudo { - checksum := service.getServiceNameChecksum() - if err := comm.Upload(fmt.Sprintf("/tmp/user-%s.toml", checksum), userToml); err != nil { - return err - } - command = p.linuxGetCommand(fmt.Sprintf("chmod o-r /tmp/user-%s.toml && mv /tmp/user-%s.toml %s/user.toml", checksum, checksum, destDir)) - return p.runCommand(o, comm, command) - } - - return comm.Upload(path.Join(destDir, "user.toml"), userToml) -} - -func (p *provisioner) linuxGetCommand(command string) string { - // Always set HAB_NONINTERACTIVE & HAB_NOCOLORING - env := fmt.Sprintf("env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true") - - // Set builder auth token - if p.BuilderAuthToken != "" { - env += fmt.Sprintf(" HAB_AUTH_TOKEN=%s", p.BuilderAuthToken) - } - - if p.UseSudo { - command = fmt.Sprintf("%s sudo -E /bin/bash -c '%s'", env, command) - } else { - command = fmt.Sprintf("%s /bin/bash -c '%s'", env, command) - } - - return command -} diff --git a/builtin/provisioners/habitat/linux_provisioner_test.go b/builtin/provisioners/habitat/linux_provisioner_test.go deleted file mode 100644 index 2706fa2de..000000000 --- a/builtin/provisioners/habitat/linux_provisioner_test.go +++ /dev/null @@ -1,348 +0,0 @@ -package habitat - -import ( - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - "testing" -) - -const linuxDefaultSystemdUnitFileContents = `[Unit] -Description=Habitat Supervisor - -[Service] -ExecStart=/bin/hab sup run --peer host1 --peer 1.2.3.4 --auto-update -Restart=on-failure -[Install] -WantedBy=default.target` - -const linuxCustomSystemdUnitFileContents = `[Unit] -Description=Habitat Supervisor - -[Service] -ExecStart=/bin/hab sup run --listen-ctl 192.168.0.1:8443 --listen-gossip 192.168.10.1:9443 --listen-http 192.168.20.1:8080 --peer host1 --peer host2 --peer 1.2.3.4 --peer 5.6.7.8 --peer foo.example.com -Restart=on-failure -Environment="HAB_SUP_GATEWAY_AUTH_TOKEN=ea7-beef" -Environment="HAB_AUTH_TOKEN=dead-beef" -[Install] -WantedBy=default.target` - -func TestLinuxProvisioner_linuxInstallHabitat(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - }{ - "Installation with sudo": { - Config: map[string]interface{}{ - "version": "0.79.1", - "auto_update": true, - "use_sudo": true, - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'curl --silent -L0 https://raw.githubusercontent.com/habitat-sh/habitat/master/components/hab/install.sh > install.sh'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'bash ./install.sh -v 0.79.1'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab install core/busybox'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab pkg exec core/busybox adduser -D -g \"\" hab'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'rm -f install.sh'": true, - }, - }, - "Installation without sudo": { - Config: map[string]interface{}{ - "version": "0.79.1", - "auto_update": true, - "use_sudo": false, - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true /bin/bash -c 'curl --silent -L0 https://raw.githubusercontent.com/habitat-sh/habitat/master/components/hab/install.sh > install.sh'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true /bin/bash -c 'bash ./install.sh -v 0.79.1'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true /bin/bash -c 'hab install core/busybox'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true /bin/bash -c 'hab pkg exec core/busybox adduser -D -g \"\" hab'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true /bin/bash -c 'rm -f install.sh'": true, - }, - }, - "Installation with Habitat license acceptance": { - Config: map[string]interface{}{ - "version": "0.81.0", - "accept_license": true, - "auto_update": true, - "use_sudo": true, - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'curl --silent -L0 https://raw.githubusercontent.com/habitat-sh/habitat/master/components/hab/install.sh > install.sh'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'bash ./install.sh -v 0.81.0'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab install core/busybox'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab pkg exec core/busybox adduser -D -g \"\" hab'": true, - "env HAB_LICENSE=accept sudo -E /bin/bash -c 'hab -V'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'rm -f install.sh'": true, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - err = p.linuxInstallHabitat(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -func TestLinuxProvisioner_linuxStartHabitat(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - Uploads map[string]string - }{ - "Start systemd Habitat with sudo": { - Config: map[string]interface{}{ - "version": "0.79.1", - "auto_update": true, - "use_sudo": true, - "service_name": "hab-sup", - "peer": "--peer host1", - "peers": []interface{}{"1.2.3.4"}, - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab install core/hab-sup/0.79.1'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'systemctl enable hab-sup && systemctl start hab-sup'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'mv /tmp/hab-sup.service /etc/systemd/system/hab-sup.service'": true, - }, - - Uploads: map[string]string{ - "/tmp/hab-sup.service": linuxDefaultSystemdUnitFileContents, - }, - }, - "Start systemd Habitat without sudo": { - Config: map[string]interface{}{ - "version": "0.79.1", - "auto_update": true, - "use_sudo": false, - "service_name": "hab-sup", - "peer": "--peer host1", - "peers": []interface{}{"1.2.3.4"}, - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true /bin/bash -c 'hab install core/hab-sup/0.79.1'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true /bin/bash -c 'systemctl enable hab-sup && systemctl start hab-sup'": true, - }, - - Uploads: map[string]string{ - "/etc/systemd/system/hab-sup.service": linuxDefaultSystemdUnitFileContents, - }, - }, - "Start unmanaged Habitat with sudo": { - Config: map[string]interface{}{ - "version": "0.81.0", - "license": "accept-no-persist", - "auto_update": true, - "use_sudo": true, - "service_type": "unmanaged", - "peer": "--peer host1", - "peers": []interface{}{"1.2.3.4"}, - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab install core/hab-sup/0.81.0'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'mkdir -p /hab/sup/default && chmod o+w /hab/sup/default'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c '(setsid hab sup run --peer host1 --peer 1.2.3.4 --auto-update > /hab/sup/default/sup.log 2>&1 <&1 &) ; sleep 1'": true, - }, - - Uploads: map[string]string{ - "/etc/systemd/system/hab-sup.service": linuxDefaultSystemdUnitFileContents, - }, - }, - "Start Habitat with custom config": { - Config: map[string]interface{}{ - "version": "0.79.1", - "auto_update": false, - "use_sudo": true, - "service_name": "hab-sup", - "peer": "--peer host1 --peer host2", - "peers": []interface{}{"1.2.3.4", "5.6.7.8", "foo.example.com"}, - "listen_ctl": "192.168.0.1:8443", - "listen_gossip": "192.168.10.1:9443", - "listen_http": "192.168.20.1:8080", - "builder_auth_token": "dead-beef", - "gateway_auth_token": "ea7-beef", - "ctl_secret": "bad-beef", - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true HAB_AUTH_TOKEN=dead-beef sudo -E /bin/bash -c 'hab install core/hab-sup/0.79.1'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true HAB_AUTH_TOKEN=dead-beef sudo -E /bin/bash -c 'systemctl enable hab-sup && systemctl start hab-sup'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true HAB_AUTH_TOKEN=dead-beef sudo -E /bin/bash -c 'mv /tmp/hab-sup.service /etc/systemd/system/hab-sup.service'": true, - }, - - Uploads: map[string]string{ - "/tmp/hab-sup.service": linuxCustomSystemdUnitFileContents, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - c.Uploads = tc.Uploads - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - err = p.linuxStartHabitat(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -func TestLinuxProvisioner_linuxUploadRingKey(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - }{ - "Upload ring key": { - Config: map[string]interface{}{ - "version": "0.79.1", - "auto_update": true, - "use_sudo": true, - "service_name": "hab-sup", - "peers": []interface{}{"1.2.3.4"}, - "ring_key": "test-ring", - "ring_key_content": "dead-beef", - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'echo -e \"dead-beef\" | hab ring key import'": true, - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - err = p.linuxUploadRingKey(o, c) - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } -} - -func TestLinuxProvisioner_linuxStartHabitatService(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - Uploads map[string]string - }{ - "Start Habitat service with sudo": { - Config: map[string]interface{}{ - "version": "0.79.1", - "auto_update": false, - "use_sudo": true, - "service_name": "hab-sup", - "peers": []interface{}{"1.2.3.4"}, - "ring_key": "test-ring", - "ring_key_content": "dead-beef", - "service": []interface{}{ - map[string]interface{}{ - "name": "core/foo", - "topology": "standalone", - "strategy": "none", - "channel": "stable", - "user_toml": "[config]\nlisten = 0.0.0.0:8080", - "bind": []interface{}{ - map[string]interface{}{ - "alias": "backend", - "service": "bar", - "group": "default", - }, - }, - }, - map[string]interface{}{ - "name": "core/bar", - "topology": "standalone", - "strategy": "rolling", - "channel": "staging", - "user_toml": "[config]\nlisten = 0.0.0.0:443", - }, - }, - }, - - Commands: map[string]bool{ - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab pkg install core/foo --channel stable'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'mkdir -p /hab/user/foo/config'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'chmod o-r /tmp/user-a5b83ec1b302d109f41852ae17379f75c36dff9bc598aae76b6f7c9cd425fd76.toml && mv /tmp/user-a5b83ec1b302d109f41852ae17379f75c36dff9bc598aae76b6f7c9cd425fd76.toml /hab/user/foo/config/user.toml'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab svc load core/foo --topology standalone --strategy none --channel stable --bind backend:bar.default'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab pkg install core/bar --channel staging'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'mkdir -p /hab/user/bar/config'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'chmod o-r /tmp/user-6466ae3283ae1bd4737b00367bc676c6465b25682169ea5f7da222f3f078a5bf.toml && mv /tmp/user-6466ae3283ae1bd4737b00367bc676c6465b25682169ea5f7da222f3f078a5bf.toml /hab/user/bar/config/user.toml'": true, - "env HAB_NONINTERACTIVE=true HAB_NOCOLORING=true sudo -E /bin/bash -c 'hab svc load core/bar --topology standalone --strategy rolling --channel staging'": true, - }, - - Uploads: map[string]string{ - "/tmp/user-a5b83ec1b302d109f41852ae17379f75c36dff9bc598aae76b6f7c9cd425fd76.toml": "[config]\nlisten = 0.0.0.0:8080", - "/tmp/user-6466ae3283ae1bd4737b00367bc676c6465b25682169ea5f7da222f3f078a5bf.toml": "[config]\nlisten = 0.0.0.0:443", - }, - }, - } - - o := new(terraform.MockUIOutput) - c := new(communicator.MockCommunicator) - - for k, tc := range cases { - c.Commands = tc.Commands - c.Uploads = tc.Uploads - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - var errs []error - for _, s := range p.Services { - err = p.linuxStartHabitatService(o, c, s) - if err != nil { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - for _, e := range errs { - t.Logf("Test %q failed: %v", k, e) - t.Fail() - } - } - } -} diff --git a/builtin/provisioners/habitat/resource_provisioner.go b/builtin/provisioners/habitat/resource_provisioner.go deleted file mode 100644 index 87534a6f6..000000000 --- a/builtin/provisioners/habitat/resource_provisioner.go +++ /dev/null @@ -1,572 +0,0 @@ -package habitat - -import ( - "context" - "crypto/sha256" - "errors" - "fmt" - "io" - "net/url" - "strings" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/go-linereader" -) - -type provisioner struct { - Version string - AutoUpdate bool - HttpDisable bool - Services []Service - PermanentPeer bool - ListenCtl string - ListenGossip string - ListenHTTP string - Peer string - Peers []string - RingKey string - RingKeyContent string - CtlSecret string - SkipInstall bool - UseSudo bool - ServiceType string - ServiceName string - URL string - Channel string - Events string - Organization string - GatewayAuthToken string - BuilderAuthToken string - SupOptions string - AcceptLicense bool - - installHabitat provisionFn - startHabitat provisionFn - uploadRingKey provisionFn - uploadCtlSecret provisionFn - startHabitatService provisionServiceFn - - osType string -} - -type provisionFn func(terraform.UIOutput, communicator.Communicator) error -type provisionServiceFn func(terraform.UIOutput, communicator.Communicator, Service) error - -func Provisioner() terraform.ResourceProvisioner { - return &schema.Provisioner{ - Schema: map[string]*schema.Schema{ - "version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "auto_update": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "http_disable": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "peer": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "peers": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "service_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "systemd", - ValidateFunc: validation.StringInSlice([]string{"systemd", "unmanaged"}, false), - }, - "service_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "hab-supervisor", - }, - "use_sudo": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "accept_license": &schema.Schema{ - Type: schema.TypeBool, - Required: true, - }, - "permanent_peer": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "listen_ctl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "listen_gossip": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "listen_http": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "ring_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "ring_key_content": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "ctl_secret": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { - u, err := url.Parse(val.(string)) - if err != nil { - errs = append(errs, fmt.Errorf("invalid URL specified for %q: %v", key, err)) - } - - if u.Scheme == "" { - errs = append(errs, fmt.Errorf("invalid URL specified for %q (scheme must be specified)", key)) - } - - return warns, errs - }, - }, - "channel": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "events": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "organization": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "gateway_auth_token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "builder_auth_token": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "service": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "binds": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "bind": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alias": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "service": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "group": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - }, - Optional: true, - }, - "topology": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"leader", "standalone"}, false), - }, - "user_toml": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "strategy": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"none", "rolling", "at-once"}, false), - }, - "channel": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "url": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { - u, err := url.Parse(val.(string)) - if err != nil { - errs = append(errs, fmt.Errorf("invalid URL specified for %q: %v", key, err)) - } - - if u.Scheme == "" { - errs = append(errs, fmt.Errorf("invalid URL specified for %q (scheme must be specified)", key)) - } - - return warns, errs - }, - }, - "application": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "environment": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "service_key": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - }, - Optional: true, - }, - }, - ApplyFunc: applyFn, - ValidateFunc: validateFn, - } -} - -func applyFn(ctx context.Context) error { - o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) - s := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) - d := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) - - p, err := decodeConfig(d) - if err != nil { - return err - } - - // Automatically determine the OS type - switch t := s.Ephemeral.ConnInfo["type"]; t { - case "ssh", "": - p.osType = "linux" - case "winrm": - p.osType = "windows" - default: - return fmt.Errorf("unsupported connection type: %s", t) - } - - switch p.osType { - case "linux": - p.installHabitat = p.linuxInstallHabitat - p.uploadRingKey = p.linuxUploadRingKey - p.uploadCtlSecret = p.linuxUploadCtlSecret - p.startHabitat = p.linuxStartHabitat - p.startHabitatService = p.linuxStartHabitatService - case "windows": - return fmt.Errorf("windows is not supported yet for the habitat provisioner") - default: - return fmt.Errorf("unsupported os type: %s", p.osType) - } - - // Get a new communicator - comm, err := communicator.New(s) - if err != nil { - return err - } - - retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) - defer cancel() - - // Wait and retry until we establish the connection - err = communicator.Retry(retryCtx, func() error { - return comm.Connect(o) - }) - - if err != nil { - return err - } - defer comm.Disconnect() - - if !p.SkipInstall { - o.Output("Installing habitat...") - if err := p.installHabitat(o, comm); err != nil { - return err - } - } - - if p.RingKeyContent != "" { - o.Output("Uploading supervisor ring key...") - if err := p.uploadRingKey(o, comm); err != nil { - return err - } - } - - if p.CtlSecret != "" { - o.Output("Uploading ctl secret...") - if err := p.uploadCtlSecret(o, comm); err != nil { - return err - } - } - - o.Output("Starting the habitat supervisor...") - if err := p.startHabitat(o, comm); err != nil { - return err - } - - if p.Services != nil { - for _, service := range p.Services { - o.Output("Starting service: " + service.Name) - if err := p.startHabitatService(o, comm, service); err != nil { - return err - } - } - } - - return nil -} - -func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { - ringKeyContent, ok := c.Get("ring_key_content") - if ok && ringKeyContent != "" && ringKeyContent != hcl2shim.UnknownVariableValue { - ringKey, ringOk := c.Get("ring_key") - if ringOk && ringKey == "" { - es = append(es, errors.New("if ring_key_content is specified, ring_key must be specified as well")) - } - } - - v, ok := c.Get("version") - if ok && v != nil && strings.TrimSpace(v.(string)) != "" { - if _, err := version.NewVersion(v.(string)); err != nil { - es = append(es, errors.New(v.(string)+" is not a valid version.")) - } - } - - acceptLicense, ok := c.Get("accept_license") - if ok && !acceptLicense.(bool) { - if v != nil && strings.TrimSpace(v.(string)) != "" { - versionOld, _ := version.NewVersion("0.79.0") - versionRequired, _ := version.NewVersion(v.(string)) - if versionRequired.GreaterThan(versionOld) { - es = append(es, errors.New("Habitat end user license agreement needs to be accepted, set the accept_license argument to true to accept")) - } - } else { // blank means latest version - es = append(es, errors.New("Habitat end user license agreement needs to be accepted, set the accept_license argument to true to accept")) - } - } - - // Validate service level configs - services, ok := c.Get("service") - if ok { - data, dataOk := services.(string) - if dataOk { - es = append(es, fmt.Errorf("service '%v': must be a block", data)) - } - } - - return ws, es -} - -type Service struct { - Name string - Strategy string - Topology string - Channel string - Group string - URL string - Binds []Bind - BindStrings []string - UserTOML string - AppName string - Environment string - ServiceGroupKey string -} - -func (s *Service) getPackageName(fullName string) string { - return strings.Split(fullName, "/")[1] -} - -func (s *Service) getServiceNameChecksum() string { - return fmt.Sprintf("%x", sha256.Sum256([]byte(s.Name))) -} - -type Bind struct { - Alias string - Service string - Group string -} - -func (b *Bind) toBindString() string { - return fmt.Sprintf("%s:%s.%s", b.Alias, b.Service, b.Group) -} - -func decodeConfig(d *schema.ResourceData) (*provisioner, error) { - p := &provisioner{ - Version: d.Get("version").(string), - AutoUpdate: d.Get("auto_update").(bool), - HttpDisable: d.Get("http_disable").(bool), - Peer: d.Get("peer").(string), - Peers: getPeers(d.Get("peers").([]interface{})), - Services: getServices(d.Get("service").(*schema.Set).List()), - UseSudo: d.Get("use_sudo").(bool), - AcceptLicense: d.Get("accept_license").(bool), - ServiceType: d.Get("service_type").(string), - ServiceName: d.Get("service_name").(string), - RingKey: d.Get("ring_key").(string), - RingKeyContent: d.Get("ring_key_content").(string), - CtlSecret: d.Get("ctl_secret").(string), - PermanentPeer: d.Get("permanent_peer").(bool), - ListenCtl: d.Get("listen_ctl").(string), - ListenGossip: d.Get("listen_gossip").(string), - ListenHTTP: d.Get("listen_http").(string), - URL: d.Get("url").(string), - Channel: d.Get("channel").(string), - Events: d.Get("events").(string), - Organization: d.Get("organization").(string), - BuilderAuthToken: d.Get("builder_auth_token").(string), - GatewayAuthToken: d.Get("gateway_auth_token").(string), - } - - return p, nil -} - -func getPeers(v []interface{}) []string { - peers := make([]string, 0, len(v)) - for _, rawPeerData := range v { - peers = append(peers, rawPeerData.(string)) - } - return peers -} - -func getServices(v []interface{}) []Service { - services := make([]Service, 0, len(v)) - for _, rawServiceData := range v { - serviceData := rawServiceData.(map[string]interface{}) - name := (serviceData["name"].(string)) - strategy := (serviceData["strategy"].(string)) - topology := (serviceData["topology"].(string)) - channel := (serviceData["channel"].(string)) - group := (serviceData["group"].(string)) - url := (serviceData["url"].(string)) - app := (serviceData["application"].(string)) - env := (serviceData["environment"].(string)) - userToml := (serviceData["user_toml"].(string)) - serviceGroupKey := (serviceData["service_key"].(string)) - var bindStrings []string - binds := getBinds(serviceData["bind"].(*schema.Set).List()) - for _, b := range serviceData["binds"].([]interface{}) { - bind, err := getBindFromString(b.(string)) - if err != nil { - return nil - } - binds = append(binds, bind) - } - - service := Service{ - Name: name, - Strategy: strategy, - Topology: topology, - Channel: channel, - Group: group, - URL: url, - UserTOML: userToml, - BindStrings: bindStrings, - Binds: binds, - AppName: app, - Environment: env, - ServiceGroupKey: serviceGroupKey, - } - services = append(services, service) - } - return services -} - -func getBinds(v []interface{}) []Bind { - binds := make([]Bind, 0, len(v)) - for _, rawBindData := range v { - bindData := rawBindData.(map[string]interface{}) - alias := bindData["alias"].(string) - service := bindData["service"].(string) - group := bindData["group"].(string) - bind := Bind{ - Alias: alias, - Service: service, - Group: group, - } - binds = append(binds, bind) - } - return binds -} - -func (p *provisioner) copyOutput(o terraform.UIOutput, r io.Reader) { - lr := linereader.New(r) - for line := range lr.Ch { - o.Output(line) - } -} - -func (p *provisioner) runCommand(o terraform.UIOutput, comm communicator.Communicator, command string) error { - outR, outW := io.Pipe() - errR, errW := io.Pipe() - - go p.copyOutput(o, outR) - go p.copyOutput(o, errR) - defer outW.Close() - defer errW.Close() - - cmd := &remote.Cmd{ - Command: command, - Stdout: outW, - Stderr: errW, - } - - if err := comm.Start(cmd); err != nil { - return fmt.Errorf("error executing command %q: %v", cmd.Command, err) - } - - if err := cmd.Wait(); err != nil { - return err - } - - return nil -} - -func getBindFromString(bind string) (Bind, error) { - t := strings.FieldsFunc(bind, func(d rune) bool { - switch d { - case ':', '.': - return true - } - return false - }) - if len(t) != 3 { - return Bind{}, errors.New("invalid bind specification: " + bind) - } - return Bind{Alias: t[0], Service: t[1], Group: t[2]}, nil -} diff --git a/builtin/provisioners/habitat/resource_provisioner_test.go b/builtin/provisioners/habitat/resource_provisioner_test.go deleted file mode 100644 index 054aa9c19..000000000 --- a/builtin/provisioners/habitat/resource_provisioner_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package habitat - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = Provisioner() -} - -func TestProvisioner(t *testing.T) { - if err := Provisioner().(*schema.Provisioner).InternalValidate(); err != nil { - t.Fatalf("error: %s", err) - } -} - -func TestResourceProvisioner_Validate_good(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "peers": []interface{}{"1.2.3.4"}, - "version": "0.32.0", - "service_type": "systemd", - "accept_license": false, - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) - } -} - -func TestResourceProvisioner_Validate_bad(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "service_type": "invalidtype", - "url": "badurl", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - // 3 errors, bad service_type, bad url, missing accept_license - if len(errs) != 3 { - t.Fatalf("Should have three errors, got %d", len(errs)) - } -} - -func TestResourceProvisioner_Validate_bad_service_config(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "accept_license": true, - "service": []interface{}{ - map[string]interface{}{ - "name": "core/foo", - "strategy": "bar", - "topology": "baz", - "url": "badurl", - }, - }, - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) != 3 { - t.Fatalf("Should have three errors, got %d", len(errs)) - } -} - -func TestResourceProvisioner_Validate_bad_service_definition(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "service": "core/vault", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) != 3 { - t.Fatalf("Should have three errors, got %d", len(errs)) - } -} - -func testConfig(t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { - return terraform.NewResourceConfigRaw(c) -} diff --git a/builtin/provisioners/local-exec/resource_provisioner.go b/builtin/provisioners/local-exec/resource_provisioner.go index f0ba28f34..8bf7d4c49 100644 --- a/builtin/provisioners/local-exec/resource_provisioner.go +++ b/builtin/provisioners/local-exec/resource_provisioner.go @@ -9,9 +9,10 @@ import ( "runtime" "github.com/armon/circbuf" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" "github.com/mitchellh/go-linereader" + "github.com/zclconf/go-cty/cty" ) const ( @@ -21,59 +22,78 @@ const ( maxBufSize = 8 * 1024 ) -func Provisioner() terraform.ResourceProvisioner { - return &schema.Provisioner{ - Schema: map[string]*schema.Schema{ - "command": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "interpreter": &schema.Schema{ - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - }, - "working_dir": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "environment": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - }, - - ApplyFunc: applyFn, +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, } } -func applyFn(ctx context.Context) error { - data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) - o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} - command := data.Get("command").(string) - if command == "" { - return fmt.Errorf("local-exec provisioner command must be a non-empty string") +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "command": { + Type: cty.String, + Required: true, + }, + "interpreter": { + Type: cty.List(cty.String), + Optional: true, + }, + "working_dir": { + Type: cty.String, + Optional: true, + }, + "environment": { + Type: cty.Map(cty.String), + Optional: true, + }, + }, } - // Execute the command with env - environment := data.Get("environment").(map[string]interface{}) + resp.Provisioner = schema + return resp +} +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + if _, err := p.GetSchema().Provisioner.CoerceValue(req.Config); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + command := req.Config.GetAttr("command").AsString() + if command == "" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("local-exec provisioner command must be a non-empty string")) + return resp + } + + envVal := req.Config.GetAttr("environment") var env []string - for k := range environment { - entry := fmt.Sprintf("%s=%s", k, environment[k].(string)) - env = append(env, entry) + + if !envVal.IsNull() { + for k, v := range envVal.AsValueMap() { + entry := fmt.Sprintf("%s=%s", k, v.AsString()) + env = append(env, entry) + } } // Execute the command using a shell - interpreter := data.Get("interpreter").([]interface{}) + intrVal := req.Config.GetAttr("interpreter") var cmdargs []string - if len(interpreter) > 0 { - for _, i := range interpreter { - if arg, ok := i.(string); ok { - cmdargs = append(cmdargs, arg) - } + if !intrVal.IsNull() && intrVal.LengthInt() > 0 { + for _, v := range intrVal.AsValueSlice() { + cmdargs = append(cmdargs, v.AsString()) } } else { if runtime.GOOS == "windows" { @@ -82,25 +102,30 @@ func applyFn(ctx context.Context) error { cmdargs = []string{"/bin/sh", "-c"} } } + cmdargs = append(cmdargs, command) - workingdir := data.Get("working_dir").(string) + workingdir := "" + if wdVal := req.Config.GetAttr("working_dir"); !wdVal.IsNull() { + workingdir = wdVal.AsString() + } - // Setup the reader that will read the output from the command. + // Set up the reader that will read the output from the command. // We use an os.Pipe so that the *os.File can be passed directly to the // process, and not rely on goroutines copying the data which may block. // See golang.org/issue/18874 pr, pw, err := os.Pipe() if err != nil { - return fmt.Errorf("failed to initialize pipe for output: %s", err) + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("failed to initialize pipe for output: %s", err)) + return resp } var cmdEnv []string cmdEnv = os.Environ() cmdEnv = append(cmdEnv, env...) - // Setup the command - cmd := exec.CommandContext(ctx, cmdargs[0], cmdargs[1:]...) + // Set up the command + cmd := exec.CommandContext(p.ctx, cmdargs[0], cmdargs[1:]...) cmd.Stderr = pw cmd.Stdout = pw // Dir specifies the working directory of the command. @@ -118,10 +143,10 @@ func applyFn(ctx context.Context) error { // copy the teed output to the UI output copyDoneCh := make(chan struct{}) - go copyOutput(o, tee, copyDoneCh) + go copyUIOutput(req.UIOutput, tee, copyDoneCh) // Output what we're about to run - o.Output(fmt.Sprintf("Executing: %q", cmdargs)) + req.UIOutput.Output(fmt.Sprintf("Executing: %q", cmdargs)) // Start the command err = cmd.Start() @@ -138,18 +163,28 @@ func applyFn(ctx context.Context) error { // copyOutput goroutine will just hang out until exit. select { case <-copyDoneCh: - case <-ctx.Done(): + case <-p.ctx.Done(): } if err != nil { - return fmt.Errorf("Error running command '%s': %v. Output: %s", - command, err, output.Bytes()) + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("Error running command '%s': %v. Output: %s", + command, err, output.Bytes())) + return resp } + return resp +} + +func (p *provisioner) Stop() error { + p.cancel() return nil } -func copyOutput(o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) { +func (p *provisioner) Close() error { + return nil +} + +func copyUIOutput(o provisioners.UIOutput, r io.Reader, doneCh chan<- struct{}) { defer close(doneCh) lr := linereader.New(r) for line := range lr.Ch { diff --git a/builtin/provisioners/local-exec/resource_provisioner_test.go b/builtin/provisioners/local-exec/resource_provisioner_test.go index 8718d4dab..235e69646 100644 --- a/builtin/provisioners/local-exec/resource_provisioner_test.go +++ b/builtin/provisioners/local-exec/resource_provisioner_test.go @@ -7,31 +7,30 @@ import ( "testing" "time" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/provisioners" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" ) -func TestResourceProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = Provisioner() -} - -func TestProvisioner(t *testing.T) { - if err := Provisioner().(*schema.Provisioner).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - func TestResourceProvider_Apply(t *testing.T) { defer os.Remove("test_out") - c := testConfig(t, map[string]interface{}{ - "command": "echo foo > test_out", + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo foo > test_out"), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, }) - output := new(terraform.MockUIOutput) - p := Provisioner() - - if err := p.Apply(output, nil, c); err != nil { - t.Fatalf("err: %v", err) + if resp.Diagnostics.HasErrors() { + t.Fatalf("err: %v", resp.Diagnostics.Err()) } // Check the file @@ -48,14 +47,18 @@ func TestResourceProvider_Apply(t *testing.T) { } func TestResourceProvider_stop(t *testing.T) { - c := testConfig(t, map[string]interface{}{ + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ // bash/zsh/ksh will exec a single command in the same process. This // makes certain there's a subprocess in the shell. - "command": "sleep 30; sleep 30", - }) - - output := new(terraform.MockUIOutput) - p := Provisioner() + "command": cty.StringVal("sleep 30; sleep 30"), + })) + if err != nil { + t.Fatal(err) + } doneCh := make(chan struct{}) startTime := time.Now() @@ -65,7 +68,10 @@ func TestResourceProvider_stop(t *testing.T) { // Because p.Apply is called in a goroutine, trying to t.Fatal() on its // result would be ignored or would cause a panic if the parent goroutine // has already completed. - _ = p.Apply(output, nil, c) + _ = p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) }() mustExceed := (50 * time.Millisecond) @@ -90,51 +96,32 @@ func TestResourceProvider_stop(t *testing.T) { } } -func TestResourceProvider_Validate_good(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "command": "echo foo", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) - } -} - -func TestResourceProvider_Validate_missing(t *testing.T) { - c := testConfig(t, map[string]interface{}{}) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") - } -} - -func testConfig(t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { - return terraform.NewResourceConfigRaw(c) -} - func TestResourceProvider_ApplyCustomInterpreter(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "interpreter": []interface{}{"echo", "is"}, - "command": "not really an interpreter", - }) + output := cli.NewMockUi() + p := New() - output := new(terraform.MockUIOutput) - p := Provisioner() + schema := p.GetSchema().Provisioner - if err := p.Apply(output, nil, c); err != nil { - t.Fatalf("err: %v", err) + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "interpreter": cty.ListVal([]cty.Value{cty.StringVal("echo"), cty.StringVal("is")}), + "command": cty.StringVal("not really an interpreter"), + })) + if err != nil { + t.Fatal(err) } - got := strings.TrimSpace(output.OutputMessage) - want := "is not really an interpreter" + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + got := strings.TrimSpace(output.OutputWriter.String()) + want := `Executing: ["echo" "is" "not really an interpreter"] +is not really an interpreter` if got != want { t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) } @@ -145,16 +132,25 @@ func TestResourceProvider_ApplyCustomWorkingDirectory(t *testing.T) { os.Mkdir(testdir, 0755) defer os.Remove(testdir) - c := testConfig(t, map[string]interface{}{ - "working_dir": testdir, - "command": "echo `pwd`", + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner + + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "working_dir": cty.StringVal(testdir), + "command": cty.StringVal("echo `pwd`"), + })) + if err != nil { + t.Fatal(err) + } + + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, }) - output := new(terraform.MockUIOutput) - p := Provisioner() - - if err := p.Apply(output, nil, c); err != nil { - t.Fatalf("err: %v", err) + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) } dir, err := os.Getwd() @@ -162,33 +158,49 @@ func TestResourceProvider_ApplyCustomWorkingDirectory(t *testing.T) { t.Fatalf("err: %v", err) } - got := strings.TrimSpace(output.OutputMessage) - want := dir + "/" + testdir + got := strings.TrimSpace(output.OutputWriter.String()) + want := "Executing: [\"/bin/sh\" \"-c\" \"echo `pwd`\"]\n" + dir + "/" + testdir if got != want { t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) } } func TestResourceProvider_ApplyCustomEnv(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "command": "echo $FOO $BAR $BAZ", - "environment": map[string]interface{}{ - "FOO": "BAR", - "BAR": 1, - "BAZ": "true", - }, - }) + output := cli.NewMockUi() + p := New() + schema := p.GetSchema().Provisioner - output := new(terraform.MockUIOutput) - p := Provisioner() - - if err := p.Apply(output, nil, c); err != nil { - t.Fatalf("err: %v", err) + c, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "command": cty.StringVal("echo $FOO $BAR $BAZ"), + "environment": cty.MapVal(map[string]cty.Value{ + "FOO": cty.StringVal("BAR"), + "BAR": cty.StringVal("1"), + "BAZ": cty.StringVal("true"), + }), + })) + if err != nil { + t.Fatal(err) } - got := strings.TrimSpace(output.OutputMessage) - want := "BAR 1 true" + resp := p.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: c, + UIOutput: output, + }) + if resp.Diagnostics.HasErrors() { + t.Fatal(resp.Diagnostics.Err()) + } + + got := strings.TrimSpace(output.OutputWriter.String()) + want := `Executing: ["/bin/sh" "-c" "echo $FOO $BAR $BAZ"] +BAR 1 true` if got != want { t.Errorf("wrong output\ngot: %s\nwant: %s", got, want) } } + +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() +} diff --git a/builtin/provisioners/puppet/bolt/bolt.go b/builtin/provisioners/puppet/bolt/bolt.go deleted file mode 100644 index 0fc70e32e..000000000 --- a/builtin/provisioners/puppet/bolt/bolt.go +++ /dev/null @@ -1,74 +0,0 @@ -package bolt - -import ( - "context" - "encoding/json" - "fmt" - "os/exec" - "runtime" - "strings" - "time" -) - -type Result struct { - Items []struct { - Node string `json:"node"` - Status string `json:"status"` - Result map[string]string `json:"result"` - } `json:"items"` - NodeCount int `json:"node_count"` - ElapsedTime int `json:"elapsed_time"` -} - -func runCommand(command string, timeout time.Duration) ([]byte, error) { - var cmdargs []string - - if runtime.GOOS == "windows" { - cmdargs = []string{"cmd", "/C"} - } else { - cmdargs = []string{"/bin/sh", "-c"} - } - cmdargs = append(cmdargs, command) - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - cmd := exec.CommandContext(ctx, cmdargs[0], cmdargs[1:]...) - return cmd.Output() -} - -func Task(connInfo map[string]string, timeout time.Duration, sudo bool, task string, args map[string]string) (*Result, error) { - cmdargs := []string{ - "bolt", "task", "run", "--nodes", connInfo["type"] + "://" + connInfo["host"], "-u", connInfo["user"], - } - - if connInfo["type"] == "winrm" { - cmdargs = append(cmdargs, "-p", "\""+connInfo["password"]+"\"", "--no-ssl") - } else { - if sudo { - cmdargs = append(cmdargs, "--run-as", "root") - } - - cmdargs = append(cmdargs, "--no-host-key-check") - } - - cmdargs = append(cmdargs, "--format", "json", "--connect-timeout", "120", task) - - if args != nil { - for key, value := range args { - cmdargs = append(cmdargs, strings.Join([]string{key, value}, "=")) - } - } - - out, err := runCommand(strings.Join(cmdargs, " "), timeout) - if err != nil { - return nil, fmt.Errorf("Bolt: \"%s\": %s: %s", strings.Join(cmdargs, " "), out, err) - } - - result := new(Result) - if err = json.Unmarshal(out, result); err != nil { - return nil, err - } - - return result, nil -} diff --git a/builtin/provisioners/puppet/linux_provisioner.go b/builtin/provisioners/puppet/linux_provisioner.go deleted file mode 100644 index f480b0e72..000000000 --- a/builtin/provisioners/puppet/linux_provisioner.go +++ /dev/null @@ -1,65 +0,0 @@ -package puppet - -import ( - "fmt" - "io" - - "github.com/hashicorp/terraform/communicator/remote" -) - -func (p *provisioner) linuxUploadFile(f io.Reader, dir string, filename string) error { - _, err := p.runCommand("mkdir -p " + dir) - if err != nil { - return fmt.Errorf("Failed to make directory %s: %s", dir, err) - } - - err = p.comm.Upload("/tmp/"+filename, f) - if err != nil { - return fmt.Errorf("Failed to upload %s to /tmp: %s", filename, err) - } - - _, err = p.runCommand(fmt.Sprintf("mv /tmp/%s %s/%s", filename, dir, filename)) - return err -} - -func (p *provisioner) linuxDefaultCertname() (string, error) { - certname, err := p.runCommand("hostname -f") - if err != nil { - return "", err - } - - return certname, nil -} - -func (p *provisioner) linuxInstallPuppetAgent() error { - _, err := p.runCommand(fmt.Sprintf("curl -kO https://%s:8140/packages/current/install.bash", p.Server)) - if err != nil { - return err - } - - _, err = p.runCommand("bash -- ./install.bash --puppet-service-ensure stopped") - if err != nil { - return err - } - - _, err = p.runCommand("rm -f install.bash") - return err -} - -func (p *provisioner) linuxRunPuppetAgent() error { - _, err := p.runCommand(fmt.Sprintf( - "/opt/puppetlabs/puppet/bin/puppet agent --test --server %s --environment %s", - p.Server, - p.Environment, - )) - - // Puppet exits 2 if changes have been successfully made. - if err != nil { - errStruct, _ := err.(*remote.ExitError) - if errStruct.ExitStatus == 2 { - return nil - } - } - - return err -} diff --git a/builtin/provisioners/puppet/linux_provisioner_test.go b/builtin/provisioners/puppet/linux_provisioner_test.go deleted file mode 100644 index 828d66708..000000000 --- a/builtin/provisioners/puppet/linux_provisioner_test.go +++ /dev/null @@ -1,379 +0,0 @@ -package puppet - -import ( - "io" - "strings" - "testing" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvisioner_linuxUploadFile(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - CommandFunc func(*remote.Cmd) error - ExpectedError bool - Uploads map[string]string - File io.Reader - Dir string - Filename string - }{ - "Successful upload": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "mkdir -p /etc/puppetlabs/puppet": true, - "mv /tmp/csr_attributes.yaml /etc/puppetlabs/puppet/csr_attributes.yaml": true, - }, - Uploads: map[string]string{ - "/tmp/csr_attributes.yaml": "", - }, - Dir: "/etc/puppetlabs/puppet", - Filename: "csr_attributes.yaml", - File: strings.NewReader(""), - }, - "Failure when creating the directory": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "mkdir -p /etc/puppetlabs/puppet": true, - }, - Dir: "/etc/puppetlabs/puppet", - Filename: "csr_attributes.yaml", - File: strings.NewReader(""), - CommandFunc: func(r *remote.Cmd) error { - r.SetExitStatus(1, &remote.ExitError{ - Command: "mkdir -p /etc/puppetlabs/puppet", - ExitStatus: 1, - Err: nil, - }) - return nil - }, - ExpectedError: true, - }, - } - - for k, tc := range cases { - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c := new(communicator.MockCommunicator) - c.Commands = tc.Commands - c.Uploads = tc.Uploads - if tc.CommandFunc != nil { - c.CommandFunc = tc.CommandFunc - } - p.comm = c - p.output = new(terraform.MockUIOutput) - - err = p.linuxUploadFile(tc.File, tc.Dir, tc.Filename) - if tc.ExpectedError { - if err == nil { - t.Fatalf("Expected error, but no error returned") - } - } else { - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } - } -} - -func TestResourceProvisioner_linuxDefaultCertname(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - CommandFunc func(*remote.Cmd) error - ExpectedError bool - }{ - "No sudo": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "hostname -f": true, - }, - }, - "With sudo": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": true, - }, - Commands: map[string]bool{ - "sudo hostname -f": true, - }, - }, - "Failed execution": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "hostname -f": true, - }, - CommandFunc: func(r *remote.Cmd) error { - if r.Command == "hostname -f" { - r.SetExitStatus(1, &remote.ExitError{ - Command: "hostname -f", - ExitStatus: 1, - Err: nil, - }) - } else { - r.SetExitStatus(0, nil) - } - return nil - }, - ExpectedError: true, - }, - } - - for k, tc := range cases { - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c := new(communicator.MockCommunicator) - c.Commands = tc.Commands - if tc.CommandFunc != nil { - c.CommandFunc = tc.CommandFunc - } - p.comm = c - p.output = new(terraform.MockUIOutput) - - _, err = p.linuxDefaultCertname() - if tc.ExpectedError { - if err == nil { - t.Fatalf("Expected error, but no error returned") - } - } else { - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } - } -} - -func TestResourceProvisioner_linuxInstallPuppetAgent(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - CommandFunc func(*remote.Cmd) error - ExpectedError bool - }{ - "Everything runs succcessfully": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "curl -kO https://puppet.test.com:8140/packages/current/install.bash": true, - "bash -- ./install.bash --puppet-service-ensure stopped": true, - "rm -f install.bash": true, - }, - }, - "Respects the use_sudo config flag": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": true, - }, - Commands: map[string]bool{ - "sudo curl -kO https://puppet.test.com:8140/packages/current/install.bash": true, - "sudo bash -- ./install.bash --puppet-service-ensure stopped": true, - "sudo rm -f install.bash": true, - }, - }, - "When the curl command fails": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "curl -kO https://puppet.test.com:8140/packages/current/install.bash": true, - "bash -- ./install.bash --puppet-service-ensure stopped": false, - "rm -f install.bash": false, - }, - CommandFunc: func(r *remote.Cmd) error { - if r.Command == "curl -kO https://puppet.test.com:8140/packages/current/install.bash" { - r.SetExitStatus(1, &remote.ExitError{ - Command: "curl -kO https://puppet.test.com:8140/packages/current/install.bash", - ExitStatus: 1, - Err: nil, - }) - } else { - r.SetExitStatus(0, nil) - } - return nil - }, - ExpectedError: true, - }, - "When the install script fails": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "curl -kO https://puppet.test.com:8140/packages/current/install.bash": true, - "bash -- ./install.bash --puppet-service-ensure stopped": true, - "rm -f install.bash": false, - }, - CommandFunc: func(r *remote.Cmd) error { - if r.Command == "bash -- ./install.bash --puppet-service-ensure stopped" { - r.SetExitStatus(1, &remote.ExitError{ - Command: "bash -- ./install.bash --puppet-service-ensure stopped", - ExitStatus: 1, - Err: nil, - }) - } else { - r.SetExitStatus(0, nil) - } - return nil - }, - ExpectedError: true, - }, - "When the cleanup rm fails": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "curl -kO https://puppet.test.com:8140/packages/current/install.bash": true, - "bash -- ./install.bash --puppet-service-ensure stopped": true, - "rm -f install.bash": true, - }, - CommandFunc: func(r *remote.Cmd) error { - if r.Command == "rm -f install.bash" { - r.SetExitStatus(1, &remote.ExitError{ - Command: "rm -f install.bash", - ExitStatus: 1, - Err: nil, - }) - } else { - r.SetExitStatus(0, nil) - } - return nil - }, - ExpectedError: true, - }, - } - - for k, tc := range cases { - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c := new(communicator.MockCommunicator) - c.Commands = tc.Commands - if tc.CommandFunc != nil { - c.CommandFunc = tc.CommandFunc - } - p.comm = c - p.output = new(terraform.MockUIOutput) - - err = p.linuxInstallPuppetAgent() - if tc.ExpectedError { - if err == nil { - t.Fatalf("Expected error, but no error returned") - } - } else { - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } - } -} - -func TestResourceProvisioner_linuxRunPuppetAgent(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - CommandFunc func(*remote.Cmd) error - ExpectedError bool - }{ - "When puppet returns 0": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - "/opt/puppetlabs/puppet/bin/puppet agent --test --server puppet.test.com --environment production": true, - }, - }, - "When puppet returns 2 (changes applied without error)": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - r.SetExitStatus(2, &remote.ExitError{ - Command: "/opt/puppetlabs/puppet/bin/puppet agent --test --server puppet.test.com", - ExitStatus: 2, - Err: nil, - }) - return nil - }, - ExpectedError: false, - }, - "When puppet returns something not 0 or 2": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - r.SetExitStatus(1, &remote.ExitError{ - Command: "/opt/puppetlabs/puppet/bin/puppet agent --test --server puppet.test.com", - ExitStatus: 1, - Err: nil, - }) - return nil - }, - ExpectedError: true, - }, - } - - for k, tc := range cases { - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c := new(communicator.MockCommunicator) - c.Commands = tc.Commands - if tc.CommandFunc != nil { - c.CommandFunc = tc.CommandFunc - } - p.comm = c - p.output = new(terraform.MockUIOutput) - - err = p.linuxRunPuppetAgent() - if tc.ExpectedError { - if err == nil { - t.Fatalf("Expected error, but no error returned") - } - } else { - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } - } -} diff --git a/builtin/provisioners/puppet/resource_provisioner.go b/builtin/provisioners/puppet/resource_provisioner.go deleted file mode 100644 index 70a99cdd2..000000000 --- a/builtin/provisioners/puppet/resource_provisioner.go +++ /dev/null @@ -1,359 +0,0 @@ -package puppet - -import ( - "bytes" - "context" - "fmt" - "io" - "strings" - "sync" - "time" - - "github.com/hashicorp/terraform/builtin/provisioners/puppet/bolt" - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/helper/validation" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/go-linereader" - "gopkg.in/yaml.v2" -) - -type provisioner struct { - Server string - ServerUser string - OSType string - Certname string - Environment string - Autosign bool - OpenSource bool - UseSudo bool - BoltTimeout time.Duration - CustomAttributes map[string]interface{} - ExtensionRequests map[string]interface{} - - runPuppetAgent func() error - installPuppetAgent func() error - uploadFile func(f io.Reader, dir string, filename string) error - defaultCertname func() (string, error) - - instanceState *terraform.InstanceState - output terraform.UIOutput - comm communicator.Communicator - - outputWG sync.WaitGroup -} - -type csrAttributes struct { - CustomAttributes map[string]string `yaml:"custom_attributes"` - ExtensionRequests map[string]string `yaml:"extension_requests"` -} - -// Provisioner returns a Puppet resource provisioner. -func Provisioner() terraform.ResourceProvisioner { - return &schema.Provisioner{ - Schema: map[string]*schema.Schema{ - "server": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "server_user": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "root", - }, - "os_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{"linux", "windows"}, false), - }, - "use_sudo": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "autosign": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "open_source": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "certname": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "extension_requests": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - "custom_attributes": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - }, - "environment": &schema.Schema{ - Type: schema.TypeString, - Default: "production", - Optional: true, - }, - "bolt_timeout": &schema.Schema{ - Type: schema.TypeString, - Default: "5m", - Optional: true, - ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { - _, err := time.ParseDuration(val.(string)) - if err != nil { - errs = append(errs, err) - } - return warns, errs - }, - }, - }, - ApplyFunc: applyFn, - } -} - -func applyFn(ctx context.Context) error { - output := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) - state := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) - configData := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) - - p, err := decodeConfig(configData) - if err != nil { - return err - } - - p.instanceState = state - p.output = output - - if p.OSType == "" { - switch connType := state.Ephemeral.ConnInfo["type"]; connType { - case "ssh", "": // The default connection type is ssh, so if the type is empty assume ssh - p.OSType = "linux" - case "winrm": - p.OSType = "windows" - default: - return fmt.Errorf("Unsupported connection type: %s", connType) - } - } - - switch p.OSType { - case "linux": - p.runPuppetAgent = p.linuxRunPuppetAgent - p.installPuppetAgent = p.linuxInstallPuppetAgent - p.uploadFile = p.linuxUploadFile - p.defaultCertname = p.linuxDefaultCertname - case "windows": - p.runPuppetAgent = p.windowsRunPuppetAgent - p.installPuppetAgent = p.windowsInstallPuppetAgent - p.uploadFile = p.windowsUploadFile - p.UseSudo = false - p.defaultCertname = p.windowsDefaultCertname - default: - return fmt.Errorf("Unsupported OS type: %s", p.OSType) - } - - comm, err := communicator.New(state) - if err != nil { - return err - } - - retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) - defer cancel() - - err = communicator.Retry(retryCtx, func() error { - return comm.Connect(output) - }) - if err != nil { - return err - } - defer comm.Disconnect() - - p.comm = comm - - if p.OpenSource { - p.installPuppetAgent = p.installPuppetAgentOpenSource - } - - csrAttrs := new(csrAttributes) - csrAttrs.CustomAttributes = make(map[string]string) - for k, v := range p.CustomAttributes { - csrAttrs.CustomAttributes[k] = v.(string) - } - - csrAttrs.ExtensionRequests = make(map[string]string) - for k, v := range p.ExtensionRequests { - csrAttrs.ExtensionRequests[k] = v.(string) - } - - if p.Autosign { - if p.Certname == "" { - p.Certname, _ = p.defaultCertname() - } - - autosignToken, err := p.generateAutosignToken(p.Certname) - if err != nil { - return fmt.Errorf("Failed to generate an autosign token: %s", err) - } - csrAttrs.CustomAttributes["challengePassword"] = autosignToken - } - - if err = p.writeCSRAttributes(csrAttrs); err != nil { - return fmt.Errorf("Failed to write csr_attributes.yaml: %s", err) - } - - if err = p.installPuppetAgent(); err != nil { - return err - } - - if err = p.runPuppetAgent(); err != nil { - return err - } - - return nil -} - -func (p *provisioner) writeCSRAttributes(attrs *csrAttributes) (rerr error) { - content, err := yaml.Marshal(attrs) - if err != nil { - return fmt.Errorf("Failed to marshal CSR attributes to YAML: %s", err) - } - - configDir := map[string]string{ - "linux": "/etc/puppetlabs/puppet", - "windows": "C:\\ProgramData\\PuppetLabs\\Puppet\\etc", - } - - return p.uploadFile(bytes.NewBuffer(content), configDir[p.OSType], "csr_attributes.yaml") -} - -func (p *provisioner) generateAutosignToken(certname string) (string, error) { - task := "autosign::generate_token" - - masterConnInfo := map[string]string{ - "type": "ssh", - "host": p.Server, - "user": p.ServerUser, - } - - result, err := bolt.Task( - masterConnInfo, - p.BoltTimeout, - p.ServerUser != "root", - task, - map[string]string{"certname": certname}, - ) - if err != nil { - return "", err - } - - if result.Items[0].Status != "success" { - return "", fmt.Errorf("Bolt %s failed on %s: %v", - task, - result.Items[0].Node, - result.Items[0].Result["_error"], - ) - } - - return result.Items[0].Result["_output"], nil -} - -func (p *provisioner) installPuppetAgentOpenSource() error { - task := "puppet_agent::install" - - connType := p.instanceState.Ephemeral.ConnInfo["type"] - if connType == "" { - connType = "ssh" - } - - agentConnInfo := map[string]string{ - "type": connType, - "host": p.instanceState.Ephemeral.ConnInfo["host"], - "user": p.instanceState.Ephemeral.ConnInfo["user"], - "password": p.instanceState.Ephemeral.ConnInfo["password"], // Required on Windows only - } - - result, err := bolt.Task( - agentConnInfo, - p.BoltTimeout, - p.UseSudo, - task, - nil, - ) - - if err != nil || result.Items[0].Status != "success" { - return fmt.Errorf("%s failed: %s\n%+v", task, err, result) - } - - return nil -} - -func (p *provisioner) runCommand(command string) (stdout string, err error) { - if p.UseSudo { - command = "sudo " + command - } - - var stdoutBuffer bytes.Buffer - outR, outW := io.Pipe() - errR, errW := io.Pipe() - outTee := io.TeeReader(outR, &stdoutBuffer) - - p.outputWG.Add(2) - go p.copyToOutput(outTee) - go p.copyToOutput(errR) - - defer outW.Close() - defer errW.Close() - - cmd := &remote.Cmd{ - Command: command, - Stdout: outW, - Stderr: errW, - } - - err = p.comm.Start(cmd) - if err != nil { - err = fmt.Errorf("Error executing command %q: %v", cmd.Command, err) - return stdout, err - } - - err = cmd.Wait() - - outW.Close() - errW.Close() - p.outputWG.Wait() - - stdout = strings.TrimSpace(stdoutBuffer.String()) - - return stdout, err -} - -func (p *provisioner) copyToOutput(reader io.Reader) { - defer p.outputWG.Done() - - lr := linereader.New(reader) - for line := range lr.Ch { - p.output.Output(line) - } -} - -func decodeConfig(d *schema.ResourceData) (*provisioner, error) { - p := &provisioner{ - UseSudo: d.Get("use_sudo").(bool), - Server: d.Get("server").(string), - ServerUser: d.Get("server_user").(string), - OSType: strings.ToLower(d.Get("os_type").(string)), - Autosign: d.Get("autosign").(bool), - OpenSource: d.Get("open_source").(bool), - Certname: strings.ToLower(d.Get("certname").(string)), - ExtensionRequests: d.Get("extension_requests").(map[string]interface{}), - CustomAttributes: d.Get("custom_attributes").(map[string]interface{}), - Environment: d.Get("environment").(string), - } - p.BoltTimeout, _ = time.ParseDuration(d.Get("bolt_timeout").(string)) - - return p, nil -} diff --git a/builtin/provisioners/puppet/resource_provisioner_test.go b/builtin/provisioners/puppet/resource_provisioner_test.go deleted file mode 100644 index 4a5cde032..000000000 --- a/builtin/provisioners/puppet/resource_provisioner_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package puppet - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = Provisioner() -} - -func TestProvisioner(t *testing.T) { - if err := Provisioner().(*schema.Provisioner).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvisioner_Validate_good_server(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "server": "puppet.test.com", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) - } -} - -func TestProvisioner_Validate_bad_no_server(t *testing.T) { - c := testConfig(t, map[string]interface{}{}) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") - } -} - -func TestProvisioner_Validate_bad_os_type(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "server": "puppet.test.com", - "os_type": "OS/2", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") - } -} - -func TestProvisioner_Validate_good_os_type_linux(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "server": "puppet.test.com", - "os_type": "linux", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) - } -} - -func TestProvisioner_Validate_good_os_type_windows(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "server": "puppet.test.com", - "os_type": "windows", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) - } -} - -func TestProvisioner_Validate_bad_bolt_timeout(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "server": "puppet.test.com", - "bolt_timeout": "123oeau", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") - } -} - -func TestProvisioner_Validate_good_bolt_timeout(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "server": "puppet.test.com", - "bolt_timeout": "123m", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", warn) - } -} - -func testConfig(t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { - return terraform.NewResourceConfigRaw(c) -} diff --git a/builtin/provisioners/puppet/windows_provisioner.go b/builtin/provisioners/puppet/windows_provisioner.go deleted file mode 100644 index eeb2154e4..000000000 --- a/builtin/provisioners/puppet/windows_provisioner.go +++ /dev/null @@ -1,71 +0,0 @@ -package puppet - -import ( - "fmt" - "io" - "strings" - - "github.com/hashicorp/terraform/communicator/remote" -) - -const ( - getHostByName = "([System.Net.Dns]::GetHostByName(($env:computerName))).Hostname" - domainQuery = "(Get-WmiObject -Query 'select DNSDomain from Win32_NetworkAdapterConfiguration where IPEnabled = True').DNSDomain" -) - -func (p *provisioner) windowsUploadFile(f io.Reader, dir string, filename string) error { - _, err := p.runCommand("powershell.exe new-item -itemtype directory -force -path " + dir) - if err != nil { - return fmt.Errorf("Failed to make directory %s: %s", dir, err) - } - - return p.comm.Upload(dir+"\\"+filename, f) -} - -func (p *provisioner) windowsDefaultCertname() (string, error) { - certname, err := p.runCommand(fmt.Sprintf(`powershell -Command "& {%s}"`, getHostByName)) - if err != nil { - return "", err - } - - // Sometimes System.Net.Dns::GetHostByName does not return a full FQDN, so - // we have to look up the domain separately. - if strings.Contains(certname, ".") { - return certname, nil - } - - domain, err := p.runCommand(fmt.Sprintf(`powershell -Command "& {%s}"`, domainQuery)) - if err != nil { - return "", err - } - - return strings.ToLower(certname + "." + domain), nil -} - -func (p *provisioner) windowsInstallPuppetAgent() error { - _, err := p.runCommand(fmt.Sprintf( - `powershell -Command "& {[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}; `+ - `(New-Object System.Net.WebClient).DownloadFile('https://%s:8140/packages/current/install.ps1', `+ - `'install.ps1')}"`, - p.Server, - )) - if err != nil { - return err - } - - _, err = p.runCommand(`powershell -Command "& .\install.ps1 -PuppetServiceEnsure stopped"`) - - return err -} - -func (p *provisioner) windowsRunPuppetAgent() error { - _, err := p.runCommand(fmt.Sprintf("puppet agent --test --server %s --environment %s", p.Server, p.Environment)) - if err != nil { - errStruct, _ := err.(*remote.ExitError) - if errStruct.ExitStatus == 2 { - return nil - } - } - - return err -} diff --git a/builtin/provisioners/puppet/windows_provisioner_test.go b/builtin/provisioners/puppet/windows_provisioner_test.go deleted file mode 100644 index 5c823ab51..000000000 --- a/builtin/provisioners/puppet/windows_provisioner_test.go +++ /dev/null @@ -1,393 +0,0 @@ -package puppet - -import ( - "fmt" - "io" - "strings" - "testing" - "time" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -const ( - getHostByNameCmd = `powershell -Command "& {([System.Net.Dns]::GetHostByName(($env:computerName))).Hostname}"` - domainQueryCmd = `powershell -Command "& {(Get-WmiObject -Query 'select DNSDomain from Win32_NetworkAdapterConfiguration where IPEnabled = True').DNSDomain}"` - downloadInstallerCmd = `powershell -Command "& {[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}; (New-Object System.Net.WebClient).DownloadFile('https://puppet.test.com:8140/packages/current/install.ps1', 'install.ps1')}"` - runInstallerCmd = `powershell -Command "& .\install.ps1 -PuppetServiceEnsure stopped"` - runPuppetCmd = "puppet agent --test --server puppet.test.com --environment production" -) - -func TestResourceProvisioner_windowsUploadFile(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - CommandFunc func(*remote.Cmd) error - ExpectedError bool - Uploads map[string]string - File io.Reader - Dir string - Filename string - }{ - "Successful upload": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - `powershell.exe new-item -itemtype directory -force -path C:\ProgramData\PuppetLabs\puppet\etc`: true, - }, - Uploads: map[string]string{ - `C:\ProgramData\PuppetLabs\puppet\etc\csr_attributes.yaml`: "", - }, - Dir: `C:\ProgramData\PuppetLabs\puppet\etc`, - Filename: "csr_attributes.yaml", - File: strings.NewReader(""), - }, - "Failure when creating the directory": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - `powershell.exe new-item -itemtype directory -force -path C:\ProgramData\PuppetLabs\puppet\etc`: true, - }, - Dir: `C:\ProgramData\PuppetLabs\puppet\etc`, - Filename: "csr_attributes.yaml", - File: strings.NewReader(""), - CommandFunc: func(r *remote.Cmd) error { - r.SetExitStatus(1, &remote.ExitError{ - Command: `powershell.exe new-item -itemtype directory -force -path C:\ProgramData\PuppetLabs\puppet\etc`, - ExitStatus: 1, - Err: nil, - }) - return nil - }, - ExpectedError: true, - }, - } - - for k, tc := range cases { - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c := new(communicator.MockCommunicator) - c.Commands = tc.Commands - c.Uploads = tc.Uploads - if tc.CommandFunc != nil { - c.CommandFunc = tc.CommandFunc - } - p.comm = c - p.output = new(terraform.MockUIOutput) - - err = p.windowsUploadFile(tc.File, tc.Dir, tc.Filename) - if tc.ExpectedError { - if err == nil { - t.Fatalf("Expected error, but no error returned") - } - } else { - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } - } -} - -func TestResourceProvisioner_windowsDefaultCertname(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - CommandFunc func(*remote.Cmd) error - ExpectedError bool - }{ - "GetHostByName failure": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - switch r.Command { - case getHostByNameCmd: - r.SetExitStatus(1, &remote.ExitError{ - Command: getHostByNameCmd, - ExitStatus: 1, - Err: nil, - }) - default: - return fmt.Errorf("Command not found!") - } - - return nil - }, - ExpectedError: true, - }, - "GetHostByName returns FQDN": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - switch r.Command { - case getHostByNameCmd: - r.Stdout.Write([]byte("example.test.com\n")) - time.Sleep(200 * time.Millisecond) - r.SetExitStatus(0, nil) - default: - return fmt.Errorf("Command not found!") - } - - return nil - }, - }, - "GetHostByName returns hostname, DNSDomain query succeeds": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - switch r.Command { - case getHostByNameCmd: - r.Stdout.Write([]byte("example\n")) - time.Sleep(200 * time.Millisecond) - r.SetExitStatus(0, nil) - case domainQueryCmd: - r.Stdout.Write([]byte("test.com\n")) - time.Sleep(200 * time.Millisecond) - r.SetExitStatus(0, nil) - default: - return fmt.Errorf("Command not found!") - } - - return nil - }, - }, - "GetHostByName returns hostname, DNSDomain query fails": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - switch r.Command { - case getHostByNameCmd: - r.Stdout.Write([]byte("example\n")) - time.Sleep(200 * time.Millisecond) - r.SetExitStatus(0, nil) - case domainQueryCmd: - r.SetExitStatus(1, &remote.ExitError{ - Command: domainQueryCmd, - ExitStatus: 1, - Err: nil, - }) - default: - return fmt.Errorf("Command not found!") - } - - return nil - }, - ExpectedError: true, - }, - } - - for k, tc := range cases { - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c := new(communicator.MockCommunicator) - c.Commands = tc.Commands - if tc.CommandFunc != nil { - c.CommandFunc = tc.CommandFunc - } - p.comm = c - p.output = new(terraform.MockUIOutput) - - _, err = p.windowsDefaultCertname() - if tc.ExpectedError { - if err == nil { - t.Fatalf("Expected error, but no error returned") - } - } else { - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } - } -} - -func TestResourceProvisioner_windowsInstallPuppetAgent(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - CommandFunc func(*remote.Cmd) error - ExpectedError bool - }{ - "Everything runs succcessfully": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - downloadInstallerCmd: true, - runInstallerCmd: true, - }, - }, - "Installer download fails": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": true, - }, - CommandFunc: func(r *remote.Cmd) error { - switch r.Command { - case downloadInstallerCmd: - r.SetExitStatus(1, &remote.ExitError{ - Command: downloadInstallerCmd, - ExitStatus: 1, - Err: nil, - }) - default: - return fmt.Errorf("Command not found!") - } - - return nil - }, - ExpectedError: true, - }, - "Install script fails": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - switch r.Command { - case downloadInstallerCmd: - r.SetExitStatus(0, nil) - case runInstallerCmd: - r.SetExitStatus(1, &remote.ExitError{ - Command: runInstallerCmd, - ExitStatus: 1, - Err: nil, - }) - default: - return fmt.Errorf("Command not found!") - } - - return nil - }, - ExpectedError: true, - }, - } - - for k, tc := range cases { - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c := new(communicator.MockCommunicator) - c.Commands = tc.Commands - if tc.CommandFunc != nil { - c.CommandFunc = tc.CommandFunc - } - p.comm = c - p.output = new(terraform.MockUIOutput) - - err = p.windowsInstallPuppetAgent() - if tc.ExpectedError { - if err == nil { - t.Fatalf("Expected error, but no error returned") - } - } else { - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } - } -} - -func TestResourceProvisioner_windowsRunPuppetAgent(t *testing.T) { - cases := map[string]struct { - Config map[string]interface{} - Commands map[string]bool - CommandFunc func(*remote.Cmd) error - ExpectedError bool - }{ - "When puppet returns 0": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - Commands: map[string]bool{ - runPuppetCmd: true, - }, - }, - "When puppet returns 2 (changes applied without error)": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - r.SetExitStatus(2, &remote.ExitError{ - Command: runPuppetCmd, - ExitStatus: 2, - Err: nil, - }) - return nil - }, - }, - "When puppet returns something not 0 or 2": { - Config: map[string]interface{}{ - "server": "puppet.test.com", - "use_sudo": false, - }, - CommandFunc: func(r *remote.Cmd) error { - r.SetExitStatus(1, &remote.ExitError{ - Command: runPuppetCmd, - ExitStatus: 1, - Err: nil, - }) - return nil - }, - ExpectedError: true, - }, - } - - for k, tc := range cases { - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, tc.Config), - ) - if err != nil { - t.Fatalf("Error: %v", err) - } - - c := new(communicator.MockCommunicator) - c.Commands = tc.Commands - if tc.CommandFunc != nil { - c.CommandFunc = tc.CommandFunc - } - p.comm = c - p.output = new(terraform.MockUIOutput) - - err = p.windowsRunPuppetAgent() - if tc.ExpectedError { - if err == nil { - t.Fatalf("Expected error, but no error returned") - } - } else { - if err != nil { - t.Fatalf("Test %q failed: %v", k, err) - } - } - } -} diff --git a/builtin/provisioners/remote-exec/resource_provisioner.go b/builtin/provisioners/remote-exec/resource_provisioner.go index 50042977e..4311ba164 100644 --- a/builtin/provisioners/remote-exec/resource_provisioner.go +++ b/builtin/provisioners/remote-exec/resource_provisioner.go @@ -3,92 +3,131 @@ package remoteexec import ( "bytes" "context" + "errors" "fmt" "io" "io/ioutil" "log" "os" "strings" - "time" "github.com/hashicorp/terraform/communicator" "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" "github.com/mitchellh/go-linereader" + "github.com/zclconf/go-cty/cty" ) -// maxBackoffDealy is the maximum delay between retry attempts -var maxBackoffDelay = 10 * time.Second -var initialBackoffDelay = time.Second - -func Provisioner() terraform.ResourceProvisioner { - return &schema.Provisioner{ - Schema: map[string]*schema.Schema{ - "inline": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - PromoteSingle: true, - Optional: true, - ConflictsWith: []string{"script", "scripts"}, - }, - - "script": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"inline", "scripts"}, - }, - - "scripts": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - ConflictsWith: []string{"script", "inline"}, - }, - }, - - ApplyFunc: applyFn, +func New() provisioners.Interface { + ctx, cancel := context.WithCancel(context.Background()) + return &provisioner{ + ctx: ctx, + cancel: cancel, } } -// Apply executes the remote exec provisioner -func applyFn(ctx context.Context) error { - connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) - data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) - o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) +type provisioner struct { + // We store a context here tied to the lifetime of the provisioner. + // This allows the Stop method to cancel any in-flight requests. + ctx context.Context + cancel context.CancelFunc +} - // Get a new communicator - comm, err := communicator.New(connState) +func (p *provisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "inline": { + Type: cty.List(cty.String), + Optional: true, + }, + "script": { + Type: cty.String, + Optional: true, + }, + "scripts": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + } + + resp.Provisioner = schema + return resp +} + +func (p *provisioner) ValidateProvisionerConfig(req provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + cfg, err := p.GetSchema().Provisioner.CoerceValue(req.Config) if err != nil { - return err + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + inline := cfg.GetAttr("inline") + script := cfg.GetAttr("script") + scripts := cfg.GetAttr("scripts") + + set := 0 + if !inline.IsNull() { + set++ + } + if !script.IsNull() { + set++ + } + if !scripts.IsNull() { + set++ + } + if set != 1 { + resp.Diagnostics = resp.Diagnostics.Append(errors.New( + `only one of "inline", "script", or "scripts" must be set`)) + } + return resp +} + +func (p *provisioner) ProvisionResource(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + comm, err := communicator.New(req.Connection) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp } // Collect the scripts - scripts, err := collectScripts(data) + scripts, err := collectScripts(req.Config) if err != nil { - return err + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp } for _, s := range scripts { defer s.Close() } // Copy and execute each script - if err := runScripts(ctx, o, comm, scripts); err != nil { - return err + if err := runScripts(p.ctx, req.UIOutput, comm, scripts); err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp } + return resp +} + +func (p *provisioner) Stop() error { + p.cancel() + return nil +} + +func (p *provisioner) Close() error { return nil } // generateScripts takes the configuration and creates a script from each inline config -func generateScripts(d *schema.ResourceData) ([]string, error) { +func generateScripts(inline cty.Value) ([]string, error) { var lines []string - for _, l := range d.Get("inline").([]interface{}) { - line, ok := l.(string) - if !ok { - return nil, fmt.Errorf("Error parsing %v as a string", l) + for _, l := range inline.AsValueSlice() { + s := l.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'scripts'") } - lines = append(lines, line) + lines = append(lines, s) } lines = append(lines, "") @@ -97,10 +136,10 @@ func generateScripts(d *schema.ResourceData) ([]string, error) { // collectScripts is used to collect all the scripts we need // to execute in preparation for copying them. -func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { +func collectScripts(v cty.Value) ([]io.ReadCloser, error) { // Check if inline - if _, ok := d.GetOk("inline"); ok { - scripts, err := generateScripts(d) + if inline := v.GetAttr("inline"); !inline.IsNull() { + scripts, err := generateScripts(inline) if err != nil { return nil, err } @@ -115,21 +154,21 @@ func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { // Collect scripts var scripts []string - if script, ok := d.GetOk("script"); ok { - scr, ok := script.(string) - if !ok { - return nil, fmt.Errorf("Error parsing script %v as string", script) + if script := v.GetAttr("script"); !script.IsNull() { + s := script.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'script'") } - scripts = append(scripts, scr) + scripts = append(scripts, s) } - if scriptList, ok := d.GetOk("scripts"); ok { - for _, script := range scriptList.([]interface{}) { - scr, ok := script.(string) - if !ok { - return nil, fmt.Errorf("Error parsing script %v as string", script) + if scriptList := v.GetAttr("scripts"); !scriptList.IsNull() { + for _, script := range scriptList.AsValueSlice() { + s := script.AsString() + if s == "" { + return nil, errors.New("invalid empty string in 'script'") } - scripts = append(scripts, scr) + scripts = append(scripts, script.AsString()) } } @@ -151,12 +190,7 @@ func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { } // runScripts is used to copy and execute a set of scripts -func runScripts( - ctx context.Context, - o terraform.UIOutput, - comm communicator.Communicator, - scripts []io.ReadCloser) error { - +func runScripts(ctx context.Context, o provisioners.UIOutput, comm communicator.Communicator, scripts []io.ReadCloser) error { retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) defer cancel() @@ -182,8 +216,8 @@ func runScripts( defer outW.Close() defer errW.Close() - go copyOutput(o, outR) - go copyOutput(o, errR) + go copyUIOutput(o, outR) + go copyUIOutput(o, errR) remotePath := comm.ScriptPath() @@ -216,8 +250,7 @@ func runScripts( return nil } -func copyOutput( - o terraform.UIOutput, r io.Reader) { +func copyUIOutput(o provisioners.UIOutput, r io.Reader) { lr := linereader.New(r) for line := range lr.Ch { o.Output(line) diff --git a/builtin/provisioners/remote-exec/resource_provisioner_test.go b/builtin/provisioners/remote-exec/resource_provisioner_test.go index cb865a8e3..16c115261 100644 --- a/builtin/provisioners/remote-exec/resource_provisioner_test.go +++ b/builtin/provisioners/remote-exec/resource_provisioner_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "io" + "log" "testing" "time" @@ -11,44 +12,33 @@ import ( "github.com/hashicorp/terraform/communicator" "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/provisioners" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" ) -func TestResourceProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = Provisioner() -} - -func TestProvisioner(t *testing.T) { - if err := Provisioner().(*schema.Provisioner).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - func TestResourceProvider_Validate_good(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "inline": "echo foo", + c := cty.ObjectVal(map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{cty.StringVal("echo foo")}), }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: c, + }) + if len(resp.Diagnostics) > 0 { + t.Fatal(resp.Diagnostics.ErrWithWarnings()) } } func TestResourceProvider_Validate_bad(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "invalid": "nope", + c := cty.ObjectVal(map[string]cty.Value{ + "invalid": cty.StringVal("nope"), }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { + resp := New().ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: c, + }) + if !resp.Diagnostics.HasErrors() { t.Fatalf("Should have errors") } } @@ -59,17 +49,13 @@ exit 0 ` func TestResourceProvider_generateScript(t *testing.T) { - conf := map[string]interface{}{ - "inline": []interface{}{ - "cd /tmp", - "wget http://foobar", - "exit 0", - }, - } + inline := cty.ListVal([]cty.Value{ + cty.StringVal("cd /tmp"), + cty.StringVal("wget http://foobar"), + cty.StringVal("exit 0"), + }) - out, err := generateScripts( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, conf), - ) + out, err := generateScripts(inline) if err != nil { t.Fatalf("err: %v", err) } @@ -84,34 +70,28 @@ func TestResourceProvider_generateScript(t *testing.T) { } func TestResourceProvider_generateScriptEmptyInline(t *testing.T) { - p := Provisioner().(*schema.Provisioner) - conf := map[string]interface{}{ - "inline": []interface{}{""}, - } + inline := cty.ListVal([]cty.Value{cty.StringVal("")}) - _, err := generateScripts(schema.TestResourceDataRaw( - t, p.Schema, conf)) + _, err := generateScripts(inline) if err == nil { t.Fatal("expected error, got none") } - if !strings.Contains(err.Error(), "Error parsing") { - t.Fatalf("expected parsing error, got: %s", err) + if !strings.Contains(err.Error(), "empty string") { + t.Fatalf("expected empty string error, got: %s", err) } } func TestResourceProvider_CollectScripts_inline(t *testing.T) { - conf := map[string]interface{}{ - "inline": []interface{}{ - "cd /tmp", - "wget http://foobar", - "exit 0", - }, + conf := map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{ + cty.StringVal("cd /tmp"), + cty.StringVal("wget http://foobar"), + cty.StringVal("exit 0"), + }), } - scripts, err := collectScripts( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, conf), - ) + scripts, err := collectScripts(cty.ObjectVal(conf)) if err != nil { t.Fatalf("err: %v", err) } @@ -132,13 +112,19 @@ func TestResourceProvider_CollectScripts_inline(t *testing.T) { } func TestResourceProvider_CollectScripts_script(t *testing.T) { - conf := map[string]interface{}{ - "script": "testdata/script1.sh", + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{ + cty.StringVal("testdata/script1.sh"), + }), + })) + if err != nil { + t.Fatal(err) } - scripts, err := collectScripts( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, conf), - ) + scripts, err := collectScripts(conf) if err != nil { t.Fatalf("err: %v", err) } @@ -159,17 +145,21 @@ func TestResourceProvider_CollectScripts_script(t *testing.T) { } func TestResourceProvider_CollectScripts_scripts(t *testing.T) { - conf := map[string]interface{}{ - "scripts": []interface{}{ - "testdata/script1.sh", - "testdata/script1.sh", - "testdata/script1.sh", - }, + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{ + cty.StringVal("testdata/script1.sh"), + cty.StringVal("testdata/script1.sh"), + cty.StringVal("testdata/script1.sh"), + }), + })) + if err != nil { + log.Fatal(err) } - scripts, err := collectScripts( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, conf), - ) + scripts, err := collectScripts(conf) if err != nil { t.Fatalf("err: %v", err) } @@ -192,25 +182,28 @@ func TestResourceProvider_CollectScripts_scripts(t *testing.T) { } func TestResourceProvider_CollectScripts_scriptsEmpty(t *testing.T) { - p := Provisioner().(*schema.Provisioner) - conf := map[string]interface{}{ - "scripts": []interface{}{""}, + p := New() + schema := p.GetSchema().Provisioner + + conf, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ + "scripts": cty.ListVal([]cty.Value{cty.StringVal("")}), + })) + if err != nil { + t.Fatal(err) } - _, err := collectScripts(schema.TestResourceDataRaw( - t, p.Schema, conf)) - + _, err = collectScripts(conf) if err == nil { t.Fatal("expected error") } - if !strings.Contains(err.Error(), "Error parsing") { - t.Fatalf("Expected parsing error, got: %s", err) + if !strings.Contains(err.Error(), "empty string") { + t.Fatalf("Expected empty string error, got: %s", err) } } func TestProvisionerTimeout(t *testing.T) { - o := new(terraform.MockUIOutput) + o := cli.NewMockUi() c := new(communicator.MockCommunicator) disconnected := make(chan struct{}) @@ -231,13 +224,11 @@ func TestProvisionerTimeout(t *testing.T) { c.UploadScripts = map[string]string{"hello": "echo hello"} c.RemoteScriptPath = "hello" - p := Provisioner().(*schema.Provisioner) - conf := map[string]interface{}{ - "inline": []interface{}{"echo hello"}, + conf := map[string]cty.Value{ + "inline": cty.ListVal([]cty.Value{cty.StringVal("echo hello")}), } - scripts, err := collectScripts(schema.TestResourceDataRaw( - t, p.Schema, conf)) + scripts, err := collectScripts(cty.ObjectVal(conf)) if err != nil { t.Fatal(err) } @@ -246,11 +237,10 @@ func TestProvisionerTimeout(t *testing.T) { done := make(chan struct{}) + var runErr error go func() { defer close(done) - if err := runScripts(ctx, o, c, scripts); err != nil { - t.Fatal(err) - } + runErr = runScripts(ctx, o, c, scripts) }() select { @@ -260,8 +250,14 @@ func TestProvisionerTimeout(t *testing.T) { } <-done + if runErr != nil { + t.Fatal(err) + } } -func testConfig(t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { - return terraform.NewResourceConfigRaw(c) +// Validate that Stop can Close can be called even when not provisioning. +func TestResourceProvisioner_StopClose(t *testing.T) { + p := New() + p.Stop() + p.Close() } diff --git a/builtin/provisioners/salt-masterless/resource_provisioner.go b/builtin/provisioners/salt-masterless/resource_provisioner.go deleted file mode 100644 index 0be7ed008..000000000 --- a/builtin/provisioners/salt-masterless/resource_provisioner.go +++ /dev/null @@ -1,525 +0,0 @@ -// This package implements a provisioner for Terraform that executes a -// saltstack state within the remote machine -// -// Adapted from gitub.com/hashicorp/packer/provisioner/salt-masterless - -package saltmasterless - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" - linereader "github.com/mitchellh/go-linereader" -) - -type provisionFn func(terraform.UIOutput, communicator.Communicator) error - -type provisioner struct { - SkipBootstrap bool - BootstrapArgs string - LocalStateTree string - DisableSudo bool - CustomState string - MinionConfig string - LocalPillarRoots string - RemoteStateTree string - RemotePillarRoots string - TempConfigDir string - NoExitOnFailure bool - LogLevel string - SaltCallArgs string - CmdArgs string -} - -const DefaultStateTreeDir = "/srv/salt" -const DefaultPillarRootDir = "/srv/pillar" - -// Provisioner returns a salt-masterless provisioner -func Provisioner() terraform.ResourceProvisioner { - return &schema.Provisioner{ - Schema: map[string]*schema.Schema{ - "local_state_tree": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "local_pillar_roots": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "remote_state_tree": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: DefaultStateTreeDir, - }, - "remote_pillar_roots": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: DefaultPillarRootDir, - }, - "temp_config_dir": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "/tmp/salt", - }, - "skip_bootstrap": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "no_exit_on_failure": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "bootstrap_args": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "disable_sudo": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - "custom_state": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "minion_config_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "cmd_args": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "salt_call_args": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "log_level": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - - ApplyFunc: applyFn, - ValidateFunc: validateFn, - } -} - -// Apply executes the file provisioner -func applyFn(ctx context.Context) error { - // Decode the raw config for this provisioner - o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) - d := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) - connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) - - p, err := decodeConfig(d) - if err != nil { - return err - } - - // Get a new communicator - comm, err := communicator.New(connState) - if err != nil { - return err - } - - retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) - defer cancel() - - // Wait and retry until we establish the connection - err = communicator.Retry(retryCtx, func() error { - return comm.Connect(o) - }) - - if err != nil { - return err - } - - // Wait for the context to end and then disconnect - go func() { - <-ctx.Done() - comm.Disconnect() - }() - - var src, dst string - - o.Output("Provisioning with Salt...") - if !p.SkipBootstrap { - cmd := &remote.Cmd{ - // Fallback on wget if curl failed for any reason (such as not being installed) - Command: fmt.Sprintf("curl -L https://bootstrap.saltstack.com -o /tmp/install_salt.sh || wget -O /tmp/install_salt.sh https://bootstrap.saltstack.com"), - } - o.Output(fmt.Sprintf("Downloading saltstack bootstrap to /tmp/install_salt.sh")) - if err = comm.Start(cmd); err != nil { - err = fmt.Errorf("Unable to download Salt: %s", err) - } - - if err := cmd.Wait(); err != nil { - return err - } - - outR, outW := io.Pipe() - errR, errW := io.Pipe() - go copyOutput(o, outR) - go copyOutput(o, errR) - defer outW.Close() - defer errW.Close() - - cmd = &remote.Cmd{ - Command: fmt.Sprintf("%s /tmp/install_salt.sh %s", p.sudo("sh"), p.BootstrapArgs), - Stdout: outW, - Stderr: errW, - } - - o.Output(fmt.Sprintf("Installing Salt with command %s", cmd.Command)) - if err := comm.Start(cmd); err != nil { - return fmt.Errorf("Unable to install Salt: %s", err) - } - - if err := cmd.Wait(); err != nil { - return err - } - } - - o.Output(fmt.Sprintf("Creating remote temporary directory: %s", p.TempConfigDir)) - if err := p.createDir(o, comm, p.TempConfigDir); err != nil { - return fmt.Errorf("Error creating remote temporary directory: %s", err) - } - - if p.MinionConfig != "" { - o.Output(fmt.Sprintf("Uploading minion config: %s", p.MinionConfig)) - src = p.MinionConfig - dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "minion")) - if err = p.uploadFile(o, comm, dst, src); err != nil { - return fmt.Errorf("Error uploading local minion config file to remote: %s", err) - } - - // move minion config into /etc/salt - o.Output(fmt.Sprintf("Make sure directory %s exists", "/etc/salt")) - if err := p.createDir(o, comm, "/etc/salt"); err != nil { - return fmt.Errorf("Error creating remote salt configuration directory: %s", err) - } - src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "minion")) - dst = "/etc/salt/minion" - if err = p.moveFile(o, comm, dst, src); err != nil { - return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %s", p.TempConfigDir, err) - } - } - - o.Output(fmt.Sprintf("Uploading local state tree: %s", p.LocalStateTree)) - src = p.LocalStateTree - dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "states")) - if err = p.uploadDir(o, comm, dst, src, []string{".git"}); err != nil { - return fmt.Errorf("Error uploading local state tree to remote: %s", err) - } - - // move state tree from temporary directory - src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "states")) - dst = p.RemoteStateTree - if err = p.removeDir(o, comm, dst); err != nil { - return fmt.Errorf("Unable to clear salt tree: %s", err) - } - if err = p.moveFile(o, comm, dst, src); err != nil { - return fmt.Errorf("Unable to move %s/states to %s: %s", p.TempConfigDir, dst, err) - } - - if p.LocalPillarRoots != "" { - o.Output(fmt.Sprintf("Uploading local pillar roots: %s", p.LocalPillarRoots)) - src = p.LocalPillarRoots - dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "pillar")) - if err = p.uploadDir(o, comm, dst, src, []string{".git"}); err != nil { - return fmt.Errorf("Error uploading local pillar roots to remote: %s", err) - } - - // move pillar root from temporary directory - src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "pillar")) - dst = p.RemotePillarRoots - - if err = p.removeDir(o, comm, dst); err != nil { - return fmt.Errorf("Unable to clear pillar root: %s", err) - } - if err = p.moveFile(o, comm, dst, src); err != nil { - return fmt.Errorf("Unable to move %s/pillar to %s: %s", p.TempConfigDir, dst, err) - } - } - - outR, outW := io.Pipe() - errR, errW := io.Pipe() - go copyOutput(o, outR) - go copyOutput(o, errR) - defer outW.Close() - defer errW.Close() - - o.Output(fmt.Sprintf("Running: salt-call --local %s", p.CmdArgs)) - cmd := &remote.Cmd{ - Command: p.sudo(fmt.Sprintf("salt-call --local %s", p.CmdArgs)), - Stdout: outW, - Stderr: errW, - } - if err = comm.Start(cmd); err != nil { - err = fmt.Errorf("Error executing salt-call: %s", err) - } - - if err := cmd.Wait(); err != nil { - return err - } - return nil -} - -// Prepends sudo to supplied command if config says to -func (p *provisioner) sudo(cmd string) string { - if p.DisableSudo { - return cmd - } - - return "sudo " + cmd -} - -func validateDirConfig(path string, name string, required bool) error { - if required == true && path == "" { - return fmt.Errorf("%s cannot be empty", name) - } else if required == false && path == "" { - return nil - } - info, err := os.Stat(path) - if err != nil { - return fmt.Errorf("%s: path '%s' is invalid: %s", name, path, err) - } else if !info.IsDir() { - return fmt.Errorf("%s: path '%s' must point to a directory", name, path) - } - return nil -} - -func validateFileConfig(path string, name string, required bool) error { - if required == true && path == "" { - return fmt.Errorf("%s cannot be empty", name) - } else if required == false && path == "" { - return nil - } - info, err := os.Stat(path) - if err != nil { - return fmt.Errorf("%s: path '%s' is invalid: %s", name, path, err) - } else if info.IsDir() { - return fmt.Errorf("%s: path '%s' must point to a file", name, path) - } - return nil -} - -func (p *provisioner) uploadFile(o terraform.UIOutput, comm communicator.Communicator, dst, src string) error { - f, err := os.Open(src) - if err != nil { - return fmt.Errorf("Error opening: %s", err) - } - defer f.Close() - - if err = comm.Upload(dst, f); err != nil { - return fmt.Errorf("Error uploading %s: %s", src, err) - } - return nil -} - -func (p *provisioner) moveFile(o terraform.UIOutput, comm communicator.Communicator, dst, src string) error { - o.Output(fmt.Sprintf("Moving %s to %s", src, dst)) - cmd := &remote.Cmd{Command: fmt.Sprintf(p.sudo("mv %s %s"), src, dst)} - if err := comm.Start(cmd); err != nil { - return fmt.Errorf("Unable to move %s to %s: %s", src, dst, err) - } - if err := cmd.Wait(); err != nil { - return err - } - return nil -} - -func (p *provisioner) createDir(o terraform.UIOutput, comm communicator.Communicator, dir string) error { - o.Output(fmt.Sprintf("Creating directory: %s", dir)) - cmd := &remote.Cmd{ - Command: fmt.Sprintf("mkdir -p '%s'", dir), - } - if err := comm.Start(cmd); err != nil { - return err - } - - if err := cmd.Wait(); err != nil { - return err - } - return nil -} - -func (p *provisioner) removeDir(o terraform.UIOutput, comm communicator.Communicator, dir string) error { - o.Output(fmt.Sprintf("Removing directory: %s", dir)) - cmd := &remote.Cmd{ - Command: fmt.Sprintf("rm -rf '%s'", dir), - } - if err := comm.Start(cmd); err != nil { - return err - } - if err := cmd.Wait(); err != nil { - return err - } - return nil -} - -func (p *provisioner) uploadDir(o terraform.UIOutput, comm communicator.Communicator, dst, src string, ignore []string) error { - if err := p.createDir(o, comm, dst); err != nil { - return err - } - - // Make sure there is a trailing "/" so that the directory isn't - // created on the other side. - if src[len(src)-1] != '/' { - src = src + "/" - } - return comm.UploadDir(dst, src) -} - -// Validate checks if the required arguments are configured -func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { - // require a salt state tree - localStateTreeTmp, ok := c.Get("local_state_tree") - var localStateTree string - if !ok { - es = append(es, - errors.New("Required local_state_tree is not set")) - } else { - localStateTree = localStateTreeTmp.(string) - } - err := validateDirConfig(localStateTree, "local_state_tree", true) - if err != nil { - es = append(es, err) - } - - var localPillarRoots string - localPillarRootsTmp, ok := c.Get("local_pillar_roots") - if !ok { - localPillarRoots = "" - } else { - localPillarRoots = localPillarRootsTmp.(string) - } - - err = validateDirConfig(localPillarRoots, "local_pillar_roots", false) - if err != nil { - es = append(es, err) - } - - var minionConfig string - minionConfigTmp, ok := c.Get("minion_config_file") - if !ok { - minionConfig = "" - } else { - minionConfig = minionConfigTmp.(string) - } - err = validateFileConfig(minionConfig, "minion_config_file", false) - if err != nil { - es = append(es, err) - } - - var remoteStateTree string - remoteStateTreeTmp, ok := c.Get("remote_state_tree") - if !ok { - remoteStateTree = DefaultStateTreeDir - } else { - remoteStateTree = remoteStateTreeTmp.(string) - } - - var remotePillarRoots string - remotePillarRootsTmp, ok := c.Get("remote_pillar_roots") - if !ok { - remotePillarRoots = DefaultPillarRootDir - } else { - remotePillarRoots = remotePillarRootsTmp.(string) - } - - if minionConfig != "" && (remoteStateTree != DefaultStateTreeDir || remotePillarRoots != DefaultPillarRootDir) { - es = append(es, - errors.New("remote_state_tree and remote_pillar_roots only apply when minion_config_file is not used")) - } - - if len(es) > 0 { - return ws, es - } - - return ws, es -} - -func decodeConfig(d *schema.ResourceData) (*provisioner, error) { - p := &provisioner{ - LocalStateTree: d.Get("local_state_tree").(string), - LogLevel: d.Get("log_level").(string), - SaltCallArgs: d.Get("salt_call_args").(string), - CmdArgs: d.Get("cmd_args").(string), - MinionConfig: d.Get("minion_config_file").(string), - CustomState: d.Get("custom_state").(string), - DisableSudo: d.Get("disable_sudo").(bool), - BootstrapArgs: d.Get("bootstrap_args").(string), - NoExitOnFailure: d.Get("no_exit_on_failure").(bool), - SkipBootstrap: d.Get("skip_bootstrap").(bool), - TempConfigDir: d.Get("temp_config_dir").(string), - RemotePillarRoots: d.Get("remote_pillar_roots").(string), - RemoteStateTree: d.Get("remote_state_tree").(string), - LocalPillarRoots: d.Get("local_pillar_roots").(string), - } - - // build the command line args to pass onto salt - var cmdArgs bytes.Buffer - - if p.CustomState == "" { - cmdArgs.WriteString(" state.highstate") - } else { - cmdArgs.WriteString(" state.sls ") - cmdArgs.WriteString(p.CustomState) - } - - if p.MinionConfig == "" { - // pass --file-root and --pillar-root if no minion_config_file is supplied - if p.RemoteStateTree != "" { - cmdArgs.WriteString(" --file-root=") - cmdArgs.WriteString(p.RemoteStateTree) - } else { - cmdArgs.WriteString(" --file-root=") - cmdArgs.WriteString(DefaultStateTreeDir) - } - if p.RemotePillarRoots != "" { - cmdArgs.WriteString(" --pillar-root=") - cmdArgs.WriteString(p.RemotePillarRoots) - } else { - cmdArgs.WriteString(" --pillar-root=") - cmdArgs.WriteString(DefaultPillarRootDir) - } - } - - if !p.NoExitOnFailure { - cmdArgs.WriteString(" --retcode-passthrough") - } - - if p.LogLevel == "" { - cmdArgs.WriteString(" -l info") - } else { - cmdArgs.WriteString(" -l ") - cmdArgs.WriteString(p.LogLevel) - } - - if p.SaltCallArgs != "" { - cmdArgs.WriteString(" ") - cmdArgs.WriteString(p.SaltCallArgs) - } - - p.CmdArgs = cmdArgs.String() - - return p, nil -} - -func copyOutput( - o terraform.UIOutput, r io.Reader) { - lr := linereader.New(r) - for line := range lr.Ch { - o.Output(line) - } -} diff --git a/builtin/provisioners/salt-masterless/resource_provisioner_test.go b/builtin/provisioners/salt-masterless/resource_provisioner_test.go deleted file mode 100644 index 10d11e561..000000000 --- a/builtin/provisioners/salt-masterless/resource_provisioner_test.go +++ /dev/null @@ -1,452 +0,0 @@ -package saltmasterless - -import ( - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/terraform" -) - -func testConfig(t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { - return terraform.NewResourceConfigRaw(c) -} - -func TestResourceProvisioner_impl(t *testing.T) { - var _ terraform.ResourceProvisioner = Provisioner() -} - -func TestProvisioner(t *testing.T) { - if err := Provisioner().(*schema.Provisioner).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestResourceProvisioner_Validate_good(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - defer os.RemoveAll(dir) // clean up - - c := testConfig(t, map[string]interface{}{ - "local_state_tree": dir, - }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) > 0 { - t.Fatalf("Errors: %v", errs) - } -} - -func TestResourceProvider_Validate_missing_required(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "remote_state_tree": "_default", - }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") - } -} - -func TestResourceProvider_Validate_LocalStateTree_doesnt_exist(t *testing.T) { - c := testConfig(t, map[string]interface{}{ - "local_state_tree": "/i/dont/exist", - }) - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") - } -} - -func TestResourceProvisioner_Validate_invalid(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - defer os.RemoveAll(dir) // clean up - - c := testConfig(t, map[string]interface{}{ - "local_state_tree": dir, - "i_am_not_valid": "_invalid", - }) - - warn, errs := Provisioner().Validate(c) - if len(warn) > 0 { - t.Fatalf("Warnings: %v", warn) - } - if len(errs) == 0 { - t.Fatalf("Should have errors") - } -} - -func TestProvisionerPrepare_CustomState(t *testing.T) { - c := map[string]interface{}{ - "local_state_tree": "/tmp/local_state_tree", - } - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("Error: %v", err) - } - - if !strings.Contains(p.CmdArgs, "state.highstate") { - t.Fatal("CmdArgs should contain state.highstate") - } - - if err != nil { - t.Fatalf("err: %s", err) - } - - c = map[string]interface{}{ - "local_state_tree": "/tmp/local_state_tree", - "custom_state": "custom", - } - - p, err = decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("Error: %v", err) - } - - if !strings.Contains(p.CmdArgs, "state.sls custom") { - t.Fatal("CmdArgs should contain state.sls custom") - } - - if err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestProvisionerPrepare_MinionConfig(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - defer os.RemoveAll(dir) // clean up - - c := testConfig(t, map[string]interface{}{ - "local_state_tree": dir, - "minion_config_file": "i/dont/exist", - }) - - warns, errs := Provisioner().Validate(c) - - if len(warns) > 0 { - t.Fatalf("Warnings: %v", warns) - } - if len(errs) == 0 { - t.Fatalf("Should have error") - } - - tf, err := ioutil.TempFile("", "minion") - if err != nil { - t.Fatalf("error tempfile: %s", err) - } - - defer os.Remove(tf.Name()) - - c = testConfig(t, map[string]interface{}{ - "local_state_tree": dir, - "minion_config_file": tf.Name(), - }) - - warns, errs = Provisioner().Validate(c) - - if len(warns) > 0 { - t.Fatalf("Warnings: %v", warns) - } - if len(errs) > 0 { - t.Fatalf("errs: %s", errs) - } -} - -func TestProvisionerPrepare_MinionConfig_RemoteStateTree(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := testConfig(t, map[string]interface{}{ - "local_state_tree": dir, - "minion_config_file": "i/dont/exist", - "remote_state_tree": "i/dont/exist/remote_state_tree", - }) - - warns, errs := Provisioner().Validate(c) - if len(warns) > 0 { - t.Fatalf("Warnings: %v", warns) - } - if len(errs) == 0 { - t.Fatalf("Should be error") - } -} - -func TestProvisionerPrepare_MinionConfig_RemotePillarRoots(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := testConfig(t, map[string]interface{}{ - "local_state_tree": dir, - "minion_config_file": "i/dont/exist", - "remote_pillar_roots": "i/dont/exist/remote_pillar_roots", - }) - - warns, errs := Provisioner().Validate(c) - if len(warns) > 0 { - t.Fatalf("Warnings: %v", warns) - } - if len(errs) == 0 { - t.Fatalf("Should be error") - } -} - -func TestProvisionerPrepare_LocalPillarRoots(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := testConfig(t, map[string]interface{}{ - "local_state_tree": dir, - "minion_config_file": "i/dont/exist", - "local_pillar_roots": "i/dont/exist/local_pillar_roots", - }) - - warns, errs := Provisioner().Validate(c) - if len(warns) > 0 { - t.Fatalf("Warnings: %v", warns) - } - if len(errs) == 0 { - t.Fatalf("Should be error") - } -} - -func TestProvisionerSudo(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := map[string]interface{}{ - "local_state_tree": dir, - } - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - withSudo := p.sudo("echo hello") - if withSudo != "sudo echo hello" { - t.Fatalf("sudo command not generated correctly") - } - - c = map[string]interface{}{ - "local_state_tree": dir, - "disable_sudo": "true", - } - - p, err = decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - withoutSudo := p.sudo("echo hello") - if withoutSudo != "echo hello" { - t.Fatalf("sudo-less command not generated correctly") - } -} - -func TestProvisionerPrepare_RemoteStateTree(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := map[string]interface{}{ - "local_state_tree": dir, - "remote_state_tree": "/remote_state_tree", - } - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(p.CmdArgs, "--file-root=/remote_state_tree") { - t.Fatal("--file-root should be set in CmdArgs") - } -} - -func TestProvisionerPrepare_RemotePillarRoots(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := map[string]interface{}{ - "local_state_tree": dir, - "remote_pillar_roots": "/remote_pillar_roots", - } - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(p.CmdArgs, "--pillar-root=/remote_pillar_roots") { - t.Fatal("--pillar-root should be set in CmdArgs") - } -} - -func TestProvisionerPrepare_RemoteStateTree_Default(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := map[string]interface{}{ - "local_state_tree": dir, - } - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(p.CmdArgs, "--file-root=/srv/salt") { - t.Fatal("--file-root should be set in CmdArgs") - } -} - -func TestProvisionerPrepare_RemotePillarRoots_Default(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := map[string]interface{}{ - "local_state_tree": dir, - } - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(p.CmdArgs, "--pillar-root=/srv/pillar") { - t.Fatal("--pillar-root should be set in CmdArgs") - } -} - -func TestProvisionerPrepare_NoExitOnFailure(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := map[string]interface{}{ - "local_state_tree": dir, - } - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(p.CmdArgs, "--retcode-passthrough") { - t.Fatal("--retcode-passthrough should be set in CmdArgs") - } - - c = map[string]interface{}{ - "no_exit_on_failure": true, - } - - p, err = decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - - if strings.Contains(p.CmdArgs, "--retcode-passthrough") { - t.Fatal("--retcode-passthrough should not be set in CmdArgs") - } -} - -func TestProvisionerPrepare_LogLevel(t *testing.T) { - dir, err := ioutil.TempDir("", "_terraform_saltmasterless_test") - if err != nil { - t.Fatalf("Error when creating temp dir: %v", err) - } - - c := map[string]interface{}{ - "local_state_tree": dir, - } - - p, err := decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(p.CmdArgs, "-l info") { - t.Fatal("-l info should be set in CmdArgs") - } - - c = map[string]interface{}{ - "log_level": "debug", - } - - p, err = decodeConfig( - schema.TestResourceDataRaw(t, Provisioner().(*schema.Provisioner).Schema, c), - ) - - if err != nil { - t.Fatalf("err: %s", err) - } - - if !strings.Contains(p.CmdArgs, "-l debug") { - t.Fatal("-l debug should be set in CmdArgs") - } -} diff --git a/command/012_config_upgrade.go b/command/012_config_upgrade.go index 6d2ff2fdb..641080fc0 100644 --- a/command/012_config_upgrade.go +++ b/command/012_config_upgrade.go @@ -1,7 +1,6 @@ package command import ( - "fmt" "strings" ) @@ -10,10 +9,10 @@ type ZeroTwelveUpgradeCommand struct { } func (c *ZeroTwelveUpgradeCommand) Run(args []string) int { - c.Ui.Output(fmt.Sprintf(` + c.Ui.Output(` The 0.12upgrade command has been removed. You must run this command with Terraform v0.12 to upgrade your configuration syntax before upgrading to the -current version.`)) +current version.`) return 0 } diff --git a/command/013_config_upgrade.go b/command/013_config_upgrade.go index 53d65a2e6..5442bef14 100644 --- a/command/013_config_upgrade.go +++ b/command/013_config_upgrade.go @@ -1,7 +1,6 @@ package command import ( - "fmt" "strings" ) @@ -12,10 +11,10 @@ type ZeroThirteenUpgradeCommand struct { } func (c *ZeroThirteenUpgradeCommand) Run(args []string) int { - c.Ui.Output(fmt.Sprintf(` + c.Ui.Output(` The 0.13upgrade command has been removed. You must run this command with Terraform v0.13 to upgrade your provider requirements before upgrading to the -current version.`)) +current version.`) return 0 } diff --git a/command/apply.go b/command/apply.go index 36b66f677..0b4df0af6 100644 --- a/command/apply.go +++ b/command/apply.go @@ -1,15 +1,13 @@ package command import ( - "bytes" "fmt" - "sort" "strings" - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/repl" - "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" + "github.com/hashicorp/terraform/plans/planfile" "github.com/hashicorp/terraform/tfdiags" ) @@ -24,7 +22,7 @@ type ApplyCommand struct { } func (c *ApplyCommand) Run(args []string) int { - var destroyForce, refresh, autoApprove bool + var refresh, autoApprove bool args = c.Meta.process(args) cmdName := "apply" if c.Destroy { @@ -33,9 +31,6 @@ func (c *ApplyCommand) Run(args []string) int { cmdFlags := c.Meta.extendedFlagSet(cmdName) cmdFlags.BoolVar(&autoApprove, "auto-approve", false, "skip interactive approval of plan before applying") - if c.Destroy { - cmdFlags.BoolVar(&destroyForce, "force", false, "deprecated: same as auto-approve") - } cmdFlags.BoolVar(&refresh, "refresh", true, "refresh") cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") @@ -45,12 +40,23 @@ func (c *ApplyCommand) Run(args []string) int { cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) return 1 } - var diags tfdiags.Diagnostics + diags := c.parseTargetFlags() + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } args = cmdFlags.Args() + var planPath string + if len(args) > 0 { + planPath = args[0] + args = args[1:] + } + configPath, err := ModulePath(args) if err != nil { c.Ui.Error(err.Error()) @@ -63,15 +69,29 @@ func (c *ApplyCommand) Run(args []string) int { return 1 } - // Check if the path is a plan - planFile, err := c.PlanFile(configPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if c.Destroy && planFile != nil { - c.Ui.Error(fmt.Sprintf("Destroy can't be called with a plan file.")) - return 1 + // Try to load plan if path is specified + var planFile *planfile.Reader + if planPath != "" { + planFile, err = c.PlanFile(planPath) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // If the path doesn't look like a plan, both planFile and err will be + // nil. In that case, the user is probably trying to use the positional + // argument to specify a configuration path. Point them at -chdir. + if planFile == nil { + c.Ui.Error(fmt.Sprintf("Failed to load %q as a plan file. Did you mean to use -chdir?", planPath)) + return 1 + } + + // If we successfully loaded a plan but this is a destroy operation, + // explain that this is not supported. + if c.Destroy { + c.Ui.Error("Destroy can't be called with a plan file.") + return 1 + } } if planFile != nil { // Reset the config path for backend loading @@ -88,6 +108,10 @@ func (c *ApplyCommand) Run(args []string) int { } } + // Set up our count hook that keeps track of resource changes + countHook := new(CountHook) + c.ExtraHooks = append(c.ExtraHooks, countHook) + // Load the backend var be backend.Enhanced var beDiags tfdiags.Diagnostics @@ -118,7 +142,7 @@ func (c *ApplyCommand) Run(args []string) int { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, "Failed to read plan from plan file", - fmt.Sprintf("The given plan file does not have a valid backend configuration. This is a bug in the Terraform command that generated this plan file."), + "The given plan file does not have a valid backend configuration. This is a bug in the Terraform command that generated this plan file.", )) c.showDiagnostics(diags) return 1 @@ -134,7 +158,7 @@ func (c *ApplyCommand) Run(args []string) int { // Applying changes with dev overrides in effect could make it impossible // to switch back to a release version if the schema isn't compatible, // so we'll warn about it. - diags = diags.Append(c.providerDevOverrideWarnings()) + diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) // Before we delegate to the backend, we'll print any warning diagnostics // we've accumulated here, since the backend will start fresh with its own @@ -147,7 +171,6 @@ func (c *ApplyCommand) Run(args []string) int { opReq.AutoApprove = autoApprove opReq.ConfigDir = configPath opReq.Destroy = c.Destroy - opReq.DestroyForce = destroyForce opReq.PlanFile = planFile opReq.PlanRefresh = refresh opReq.Type = backend.OperationTypeApply @@ -173,13 +196,44 @@ func (c *ApplyCommand) Run(args []string) int { c.showDiagnostics(err) return 1 } + if op.Result != backend.OperationSuccess { return op.Result.ExitStatus() } - if !c.Destroy { - if outputs := outputsAsString(op.State, addrs.RootModuleInstance, true); outputs != "" { - c.Ui.Output(c.Colorize().Color(outputs)) + // Show the count results from the operation + if c.Destroy { + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + "[reset][bold][green]\n"+ + "Destroy complete! Resources: %d destroyed.", + countHook.Removed))) + } else { + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + "[reset][bold][green]\n"+ + "Apply complete! Resources: %d added, %d changed, %d destroyed.", + countHook.Added, + countHook.Changed, + countHook.Removed))) + } + + // only show the state file help message if the state is local. + if (countHook.Added > 0 || countHook.Changed > 0) && c.Meta.stateOutPath != "" { + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + "[reset]\n"+ + "The state of your infrastructure has been saved to the path\n"+ + "below. This state is required to modify and destroy your\n"+ + "infrastructure, so keep it safe. To inspect the complete state\n"+ + "use the `terraform show` command.\n\n"+ + "State path: %s", + c.Meta.stateOutPath))) + } + + if !c.Destroy && op.State != nil { + outputValues := op.State.RootModule().OutputValues + if len(outputValues) > 0 { + c.Ui.Output(c.Colorize().Color("[reset][bold][green]\nOutputs:\n\n")) + view := views.NewOutput(arguments.ViewHuman, c.View) + view.Output("", outputValues) } } @@ -266,7 +320,7 @@ Options: func (c *ApplyCommand) helpDestroy() string { helpText := ` -Usage: terraform destroy [options] [DIR] +Usage: terraform destroy [options] Destroy Terraform-managed infrastructure. @@ -278,8 +332,6 @@ Options: -auto-approve Skip interactive approval before destroying. - -force Deprecated: same as auto-approve. - -lock=true Lock the state file when locking is supported. -lock-timeout=0s Duration to retry a state lock. @@ -315,49 +367,6 @@ Options: return strings.TrimSpace(helpText) } -func outputsAsString(state *states.State, modPath addrs.ModuleInstance, includeHeader bool) string { - if state == nil { - return "" - } - - ms := state.Module(modPath) - if ms == nil { - return "" - } - - outputs := ms.OutputValues - outputBuf := new(bytes.Buffer) - if len(outputs) > 0 { - if includeHeader { - outputBuf.WriteString("[reset][bold][green]\nOutputs:\n\n") - } - - // Output the outputs in alphabetical order - keyLen := 0 - ks := make([]string, 0, len(outputs)) - for key, _ := range outputs { - ks = append(ks, key) - if len(key) > keyLen { - keyLen = len(key) - } - } - sort.Strings(ks) - - for _, k := range ks { - v := outputs[k] - if v.Sensitive { - outputBuf.WriteString(fmt.Sprintf("%s = \n", k)) - continue - } - - result := repl.FormatValue(v.Value, 0) - outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, result)) - } - } - - return strings.TrimSpace(outputBuf.String()) -} - const outputInterrupt = `Interrupt received. Please wait for Terraform to exit or data loss may occur. Gracefully shutting down...` diff --git a/command/apply_destroy_test.go b/command/apply_destroy_test.go index 6acffdf75..e4f5d8429 100644 --- a/command/apply_destroy_test.go +++ b/command/apply_destroy_test.go @@ -1,6 +1,7 @@ package command import ( + "bytes" "os" "strings" "testing" @@ -14,10 +15,15 @@ import ( "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statefile" - "github.com/hashicorp/terraform/terraform" ) func TestApply_destroy(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -38,23 +44,27 @@ func TestApply_destroy(t *testing.T) { statePath := testStateFile(t, originalState) p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, } ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Destroy: true, Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -62,7 +72,6 @@ func TestApply_destroy(t *testing.T) { args := []string{ "-auto-approve", "-state", statePath, - testFixturePath("apply"), } if code := c.Run(args); code != 0 { t.Log(ui.OutputWriter.String()) @@ -113,7 +122,134 @@ func TestApply_destroy(t *testing.T) { } } +func TestApply_destroyApproveNo(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + // Create some existing state + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // Answer approval request with "no" + defaultInputReader = bytes.NewBufferString("no\n") + defaultInputWriter = new(bytes.Buffer) + + p := applyFixtureProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + if got, want := ui.OutputWriter.String(), "Destroy cancelled"; !strings.Contains(got, want) { + t.Fatalf("expected output to include %q, but was:\n%s", want, got) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + actualStr := strings.TrimSpace(state.String()) + expectedStr := strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + +func TestApply_destroyApproveYes(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // Answer approval request with "yes" + defaultInputReader = bytes.NewBufferString("yes\n") + defaultInputWriter = new(bytes.Buffer) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } + + actualStr := strings.TrimSpace(state.String()) + expectedStr := strings.TrimSpace(testApplyDestroyStr) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } +} + func TestApply_destroyLockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -141,11 +277,13 @@ func TestApply_destroyLockedState(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Destroy: true, Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -153,7 +291,6 @@ func TestApply_destroyLockedState(t *testing.T) { args := []string{ "-auto-approve", "-state", statePath, - testFixturePath("apply"), } if code := c.Run(args); code == 0 { @@ -167,15 +304,23 @@ func TestApply_destroyLockedState(t *testing.T) { } func TestApply_destroyPlan(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + planPath := testPlanFileNoop(t) p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Destroy: true, Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -186,9 +331,55 @@ func TestApply_destroyPlan(t *testing.T) { if code := c.Run(args); code != 1 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } + output := ui.ErrorWriter.String() + if !strings.Contains(output, "plan file") { + t.Fatal("expected command output to refer to plan file, but got:", output) + } } -func TestApply_destroyTargeted(t *testing.T) { +func TestApply_destroyPath(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-auto-approve", + testFixturePath("apply"), + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + output := ui.ErrorWriter.String() + if !strings.Contains(output, "-chdir") { + t.Fatal("expected command output to refer to -chdir flag, but got:", output) + } +} + +// Config with multiple resources with dependencies, targeting destroy of a +// root node, expecting all other resources to be destroyed due to +// dependencies. +func TestApply_destroyTargetedDependencies(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-destroy-targeted"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -225,17 +416,21 @@ func TestApply_destroyTargeted(t *testing.T) { statePath := testStateFile(t, originalState) p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, }, }, "test_load_balancer": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "instances": {Type: cty.List(cty.String), Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "instances": {Type: cty.List(cty.String), Optional: true}, + }, }, }, }, @@ -247,11 +442,13 @@ func TestApply_destroyTargeted(t *testing.T) { } ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Destroy: true, Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -260,7 +457,6 @@ func TestApply_destroyTargeted(t *testing.T) { "-auto-approve", "-target", "test_instance.foo", "-state", statePath, - testFixturePath("apply-destroy-targeted"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -309,6 +505,158 @@ func TestApply_destroyTargeted(t *testing.T) { } } +// Config with multiple resources with dependencies, targeting destroy of a +// leaf node, expecting the other resources to remain. +func TestApply_destroyTargeted(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-destroy-targeted"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + originalState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"i-ab123"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_load_balancer", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"i-abc123"}`), + Dependencies: []addrs.ConfigResource{mustResourceAddr("test_instance.foo")}, + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + wantState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"i-ab123"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, originalState) + + p := testProvider() + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + "test_load_balancer": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "instances": {Type: cty.List(cty.String), Optional: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-auto-approve", + "-target", "test_load_balancer.foo", + "-state", statePath, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + stateFile, err := statefile.Read(f) + if err != nil { + t.Fatalf("err: %s", err) + } + if stateFile == nil || stateFile.State == nil { + t.Fatal("state should not be nil") + } + + actualStr := strings.TrimSpace(stateFile.State.String()) + expectedStr := strings.TrimSpace(wantState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\nactual:\n%s\n\nexpected:\nb%s", actualStr, expectedStr) + } + + // Should have a backup file + f, err = os.Open(statePath + DefaultBackupExtension) + if err != nil { + t.Fatalf("err: %s", err) + } + + backupStateFile, err := statefile.Read(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + backupActualStr := strings.TrimSpace(backupStateFile.State.String()) + backupExpectedStr := strings.TrimSpace(originalState.String()) + if backupActualStr != backupExpectedStr { + t.Fatalf("bad:\n\nactual:\n%s\n\nexpected:\nb%s", backupActualStr, backupExpectedStr) + } +} + const testApplyDestroyStr = ` ` diff --git a/command/apply_test.go b/command/apply_test.go index 280568b4f..2980bd61d 100644 --- a/command/apply_test.go +++ b/command/apply_test.go @@ -2,11 +2,9 @@ package command import ( "bytes" + "context" "fmt" "io/ioutil" - "net" - "net/http" - "net/url" "os" "path/filepath" "strings" @@ -29,23 +27,151 @@ import ( ) func TestApply(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + statePath := testTempFile(t) p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } args := []string{ "-state", statePath, "-auto-approve", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + state := testStateRead(t, statePath) + if state == nil { + t.Fatal("state should not be nil") + } +} + +func TestApply_path(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + p := applyFixtureProvider() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-auto-approve", testFixturePath("apply"), } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + output := ui.ErrorWriter.String() + if !strings.Contains(output, "-chdir") { + t.Fatal("expected command output to refer to -chdir flag, but got:", output) + } +} + +func TestApply_approveNo(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // Answer approval request with "no" + defaultInputReader = bytes.NewBufferString("no\n") + defaultInputWriter = new(bytes.Buffer) + + p := applyFixtureProvider() + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + if got, want := ui.OutputWriter.String(), "Apply cancelled"; !strings.Contains(got, want) { + t.Fatalf("expected output to include %q, but was:\n%s", want, got) + } + + if _, err := os.Stat(statePath); err == nil || !os.IsNotExist(err) { + t.Fatalf("state file should not exist") + } +} + +func TestApply_approveYes(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + statePath := testTempFile(t) + + p := applyFixtureProvider() + + // Disable test mode so input would be asked + test = false + defer func() { test = true }() + + // Answer approval request with "yes" + defaultInputReader = bytes.NewBufferString("yes\n") + defaultInputWriter = new(bytes.Buffer) + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-state", statePath, + } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } @@ -62,6 +188,12 @@ func TestApply(t *testing.T) { // test apply with locked state func TestApply_lockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + statePath := testTempFile(t) unlock, err := testLockState(testDataDir, statePath) @@ -72,17 +204,18 @@ func TestApply_lockedState(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } args := []string{ "-state", statePath, "-auto-approve", - testFixturePath("apply"), } if code := c.Run(args); code == 0 { t.Fatal("expected error") @@ -96,6 +229,12 @@ func TestApply_lockedState(t *testing.T) { // test apply with locked state, waiting for unlock func TestApply_lockedStateWait(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + statePath := testTempFile(t) unlock, err := testLockState(testDataDir, statePath) @@ -111,10 +250,12 @@ func TestApply_lockedStateWait(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -124,53 +265,34 @@ func TestApply_lockedStateWait(t *testing.T) { "-state", statePath, "-lock-timeout", "4s", "-auto-approve", - testFixturePath("apply"), } if code := c.Run(args); code != 0 { t.Fatalf("lock should have succeeded in less than 3s: %s", ui.ErrorWriter) } } -// high water mark counter -type hwm struct { - sync.Mutex - val int - max int -} - -func (t *hwm) Inc() { - t.Lock() - defer t.Unlock() - t.val++ - if t.val > t.max { - t.max = t.val - } -} - -func (t *hwm) Dec() { - t.Lock() - defer t.Unlock() - t.val-- -} - -func (t *hwm) Max() int { - t.Lock() - defer t.Unlock() - return t.max -} - +// Verify that the parallelism flag allows no more than the desired number of +// concurrent calls to ApplyResourceChange. func TestApply_parallelism(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("parallelism"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + statePath := testTempFile(t) par := 4 - // This blocks all the apply functions. We close it when we exit so - // they end quickly after this test finishes. - block := make(chan struct{}) - // signal how many goroutines have started - started := make(chan int, 100) + // started is a semaphore that we use to ensure that we never have more + // than "par" apply operations happening concurrently + started := make(chan struct{}, par) - runCount := &hwm{} + // beginCtx is used as a starting gate to hold back ApplyResourceChange + // calls until we reach the desired concurrency. The cancel func "begin" is + // called once we reach the desired concurrency, allowing all apply calls + // to proceed in unison. + beginCtx, begin := context.WithCancel(context.Background()) // Since our mock provider has its own mutex preventing concurrent calls // to ApplyResourceChange, we need to use a number of separate providers @@ -180,9 +302,9 @@ func TestApply_parallelism(t *testing.T) { for i := 0; i < 10; i++ { name := fmt.Sprintf("test%d", i) provider := &terraform.MockProvider{} - provider.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - name + "_instance": {}, + provider.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + name + "_instance": {Block: &configschema.Block{}}, }, } provider.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { @@ -191,12 +313,29 @@ func TestApply_parallelism(t *testing.T) { } } provider.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { - // Increment so we're counting parallelism - started <- 1 - runCount.Inc() - defer runCount.Dec() - // Block here to stage up our max number of parallel instances - <-block + + // If we ever have more than our intended parallelism number of + // apply operations running concurrently, the semaphore will fail. + select { + case started <- struct{}{}: + defer func() { + <-started + }() + default: + t.Fatal("too many concurrent apply operations") + } + + // If we never reach our intended parallelism, the context will + // never be canceled and the test will time out. + if len(started) >= par { + begin() + } + <-beginCtx.Done() + + // do some "work" + // Not required for correctness, but makes it easier to spot a + // failure when there is more overlap. + time.Sleep(10 * time.Millisecond) return providers.ApplyResourceChangeResponse{ NewState: cty.EmptyObjectVal, @@ -209,10 +348,12 @@ func TestApply_parallelism(t *testing.T) { } ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: testingOverrides, Ui: ui, + View: view, }, } @@ -220,60 +361,35 @@ func TestApply_parallelism(t *testing.T) { "-state", statePath, "-auto-approve", fmt.Sprintf("-parallelism=%d", par), - testFixturePath("parallelism"), } - // Run in a goroutine. We can get any errors from the ui.OutputWriter - doneCh := make(chan int, 1) - go func() { - doneCh <- c.Run(args) - }() - - timeout := time.After(5 * time.Second) - - // ensure things are running - for i := 0; i < par; i++ { - select { - case <-timeout: - t.Fatal("timeout waiting for all goroutines to start") - case <-started: - } - } - - // a little extra sleep, since we can't ensure all goroutines from the walk have - // really started - time.Sleep(100 * time.Millisecond) - close(block) - - select { - case res := <-doneCh: - if res != 0 { - t.Fatal(ui.OutputWriter.String()) - } - case <-timeout: - t.Fatal("timeout waiting from Run()") - } - - // The total in flight should equal the parallelism - if runCount.Max() != par { - t.Fatalf("Expected parallelism: %d, got: %d", par, runCount.Max()) + res := c.Run(args) + if res != 0 { + t.Fatal(ui.OutputWriter.String()) } } func TestApply_configInvalid(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-config-invalid"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } args := []string{ "-state", testTempFile(t), "-auto-approve", - testFixturePath("apply-config-invalid"), } if code := c.Run(args); code != 1 { t.Fatalf("bad: \n%s", ui.OutputWriter.String()) @@ -281,7 +397,12 @@ func TestApply_configInvalid(t *testing.T) { } func TestApply_defaultState(t *testing.T) { - td := testTempDir(t) + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + statePath := filepath.Join(td, DefaultStateFilename) // Change to the temporary directory @@ -296,10 +417,12 @@ func TestApply_defaultState(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -311,7 +434,6 @@ func TestApply_defaultState(t *testing.T) { args := []string{ "-auto-approve", - testFixturePath("apply"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -328,14 +450,22 @@ func TestApply_defaultState(t *testing.T) { } func TestApply_error(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-error"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + statePath := testTempFile(t) p := testProvider() ui := cli.NewMockUi() + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -362,13 +492,15 @@ func TestApply_error(t *testing.T) { resp.PlannedState = cty.ObjectVal(s) return } - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - "error": {Type: cty.Bool, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + "error": {Type: cty.Bool, Optional: true}, + }, }, }, }, @@ -377,7 +509,6 @@ func TestApply_error(t *testing.T) { args := []string{ "-state", statePath, "-auto-approve", - testFixturePath("apply-error"), } if ui.ErrorWriter != nil { t.Logf("stdout:\n%s", ui.OutputWriter.String()) @@ -401,6 +532,12 @@ func TestApply_error(t *testing.T) { } func TestApply_input(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-input"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + // Disable test mode so input would be asked test = false defer func() { test = true }() @@ -417,17 +554,18 @@ func TestApply_input(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } args := []string{ "-state", statePath, "-auto-approve", - testFixturePath("apply-input"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -445,6 +583,12 @@ result = foo // When only a partial set of the variables are set, Terraform // should still ask for the unset ones by default (with -input=true) func TestApply_inputPartial(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-input-partial"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + // Disable test mode so input would be asked test = false defer func() { test = true }() @@ -457,10 +601,12 @@ func TestApply_inputPartial(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -468,7 +614,6 @@ func TestApply_inputPartial(t *testing.T) { "-state", statePath, "-auto-approve", "-var", "foo=foovalue", - testFixturePath("apply-input-partial"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -485,23 +630,22 @@ foo = foovalue } func TestApply_noArgs(t *testing.T) { - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(testFixturePath("apply")); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() statePath := testTempFile(t) p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -518,9 +662,6 @@ func TestApply_noArgs(t *testing.T) { } state := testStateRead(t, statePath) - if err != nil { - t.Fatalf("err: %s", err) - } if state == nil { t.Fatal("state should not be nil") } @@ -540,10 +681,12 @@ func TestApply_plan(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -572,10 +715,12 @@ func TestApply_plan_backup(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -604,10 +749,12 @@ func TestApply_plan_noBackup(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -682,10 +829,12 @@ func TestApply_plan_remoteState(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -729,10 +878,12 @@ func TestApply_planWithVarFile(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -760,10 +911,12 @@ func TestApply_planVars(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -798,12 +951,15 @@ func TestApply_planNoModuleFiles(t *testing.T) { planPath, } apply.Run(args) - if p.PrepareProviderConfigCalled { - t.Fatal("Prepare provider config should not be called with a plan") - } } func TestApply_refresh(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -825,17 +981,18 @@ func TestApply_refresh(t *testing.T) { p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } args := []string{ "-state", statePath, "-auto-approve", - testFixturePath("apply"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -865,6 +1022,12 @@ func TestApply_refresh(t *testing.T) { } func TestApply_shutdown(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-shutdown"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + cancelled := make(chan struct{}) shutdownCh := make(chan struct{}) @@ -872,10 +1035,12 @@ func TestApply_shutdown(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, ShutdownCh: shutdownCh, }, } @@ -909,11 +1074,13 @@ func TestApply_shutdown(t *testing.T) { return } - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -922,9 +1089,8 @@ func TestApply_shutdown(t *testing.T) { args := []string{ "-state", statePath, "-auto-approve", - testFixturePath("apply-shutdown"), } - if code := c.Run(args); code != 0 { + if code := c.Run(args); code != 1 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } @@ -945,6 +1111,12 @@ func TestApply_shutdown(t *testing.T) { } func TestApply_state(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -965,22 +1137,24 @@ func TestApply_state(t *testing.T) { statePath := testStateFile(t, originalState) p := applyFixtureProvider() - p.PlanResourceChangeResponse = providers.PlanResourceChangeResponse{ + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ PlannedState: cty.ObjectVal(map[string]cty.Value{ "ami": cty.StringVal("bar"), }), } - p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{ + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "ami": cty.StringVal("bar"), }), } ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -988,7 +1162,6 @@ func TestApply_state(t *testing.T) { args := []string{ "-state", statePath, "-auto-approve", - testFixturePath("apply"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1033,18 +1206,25 @@ func TestApply_state(t *testing.T) { } func TestApply_stateNoExist(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + p := applyFixtureProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } args := []string{ "idontexist.tfstate", - testFixturePath("apply"), } if code := c.Run(args); code != 1 { t.Fatalf("bad: \n%s", ui.OutputWriter.String()) @@ -1052,12 +1232,20 @@ func TestApply_stateNoExist(t *testing.T) { } func TestApply_sensitiveOutput(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-sensitive-output"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + p := testProvider() ui := new(cli.MockUi) + view, done := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -1066,14 +1254,13 @@ func TestApply_sensitiveOutput(t *testing.T) { args := []string{ "-state", statePath, "-auto-approve", - testFixturePath("apply-sensitive-output"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.OutputWriter.String()) } - output := ui.OutputWriter.String() + output := done(t).Stdout() if !strings.Contains(output, "notsensitive = \"Hello world\"") { t.Fatalf("bad: output should contain 'notsensitive' output\n%s", output) } @@ -1083,23 +1270,33 @@ func TestApply_sensitiveOutput(t *testing.T) { } func TestApply_vars(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + statePath := testTempFile(t) p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } actual := "" - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -1120,7 +1317,6 @@ func TestApply_vars(t *testing.T) { "-auto-approve", "-var", "foo=bar", "-state", statePath, - testFixturePath("apply-vars"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1132,6 +1328,12 @@ func TestApply_vars(t *testing.T) { } func TestApply_varFile(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + varFilePath := testTempFile(t) if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { t.Fatalf("err: %s", err) @@ -1141,19 +1343,23 @@ func TestApply_varFile(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } actual := "" - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -1174,7 +1380,6 @@ func TestApply_varFile(t *testing.T) { "-auto-approve", "-var-file", varFilePath, "-state", statePath, - testFixturePath("apply-vars"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1186,38 +1391,38 @@ func TestApply_varFile(t *testing.T) { } func TestApply_varFileDefault(t *testing.T) { - varFileDir := testTempDir(t) - varFilePath := filepath.Join(varFileDir, "terraform.tfvars") + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars") if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { t.Fatalf("err: %s", err) } statePath := testTempFile(t) - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(varFileDir); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } actual := "" - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -1237,7 +1442,6 @@ func TestApply_varFileDefault(t *testing.T) { args := []string{ "-auto-approve", "-state", statePath, - testFixturePath("apply-vars"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1249,38 +1453,38 @@ func TestApply_varFileDefault(t *testing.T) { } func TestApply_varFileDefaultJSON(t *testing.T) { - varFileDir := testTempDir(t) - varFilePath := filepath.Join(varFileDir, "terraform.tfvars.json") + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars.json") if err := ioutil.WriteFile(varFilePath, []byte(applyVarFileJSON), 0644); err != nil { t.Fatalf("err: %s", err) } statePath := testTempFile(t) - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(varFileDir); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } actual := "" - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -1300,7 +1504,6 @@ func TestApply_varFileDefaultJSON(t *testing.T) { args := []string{ "-auto-approve", "-state", statePath, - testFixturePath("apply-vars"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1312,6 +1515,12 @@ func TestApply_varFileDefaultJSON(t *testing.T) { } func TestApply_backup(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -1333,17 +1542,19 @@ func TestApply_backup(t *testing.T) { backupPath := testTempFile(t) p := applyFixtureProvider() - p.PlanResourceChangeResponse = providers.PlanResourceChangeResponse{ + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ PlannedState: cty.ObjectVal(map[string]cty.Value{ "ami": cty.StringVal("bar"), }), } ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -1352,7 +1563,6 @@ func TestApply_backup(t *testing.T) { "-auto-approve", "-state", statePath, "-backup", backupPath, - testFixturePath("apply"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1383,21 +1593,29 @@ func TestApply_backup(t *testing.T) { } func TestApply_disableBackup(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := testState() statePath := testStateFile(t, originalState) p := applyFixtureProvider() - p.PlanResourceChangeResponse = providers.PlanResourceChangeResponse{ + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ PlannedState: cty.ObjectVal(map[string]cty.Value{ "ami": cty.StringVal("bar"), }), } ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } @@ -1406,7 +1624,6 @@ func TestApply_disableBackup(t *testing.T) { "-auto-approve", "-state", statePath, "-backup", "-", - testFixturePath("apply"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1456,21 +1673,28 @@ func TestApply_disableBackup(t *testing.T) { // Test that the Terraform env is passed through func TestApply_terraformEnv(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-terraform-env"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + statePath := testTempFile(t) p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } args := []string{ "-auto-approve", "-state", statePath, - testFixturePath("apply-terraform-env"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1489,15 +1713,16 @@ output = default func TestApply_terraformEnvNonDefault(t *testing.T) { // Create a temporary working directory that is empty td := tempDir(t) - os.MkdirAll(td, 0755) + testCopyDir(t, testFixturePath("apply-terraform-env"), td) defer os.RemoveAll(td) defer testChdir(t, td)() // Create new env { ui := new(cli.MockUi) + view, _ := testView(t) newCmd := &WorkspaceNewCommand{} - newCmd.Meta = Meta{Ui: ui} + newCmd.Meta = Meta{Ui: ui, View: view} if code := newCmd.Run([]string{"test"}); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) } @@ -1507,8 +1732,9 @@ func TestApply_terraformEnvNonDefault(t *testing.T) { { args := []string{"test"} ui := new(cli.MockUi) + view, _ := testView(t) selCmd := &WorkspaceSelectCommand{} - selCmd.Meta = Meta{Ui: ui} + selCmd.Meta = Meta{Ui: ui, View: view} if code := selCmd.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter) } @@ -1516,16 +1742,17 @@ func TestApply_terraformEnvNonDefault(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &ApplyCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } args := []string{ "-auto-approve", - testFixturePath("apply-terraform-env"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -1541,41 +1768,108 @@ output = test testStateOutput(t, statePath, expected) } -func testHttpServer(t *testing.T) net.Listener { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) +// Config with multiple resources, targeting apply of a subset +func TestApply_targeted(t *testing.T) { + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-targeted"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + p := testProvider() + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } } - mux := http.NewServeMux() - mux.HandleFunc("/header", testHttpHandlerHeader) + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } - var server http.Server - server.Handler = mux - go server.Serve(ln) + args := []string{ + "-auto-approve", + "-target", "test_instance.foo", + "-target", "test_instance.baz", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } - return ln + if got, want := ui.OutputWriter.String(), "3 added, 0 changed, 0 destroyed"; !strings.Contains(got, want) { + t.Fatalf("bad change summary, want %q, got:\n%s", want, got) + } } -func testHttpHandlerHeader(w http.ResponseWriter, r *http.Request) { - var url url.URL - url.Scheme = "file" - url.Path = filepath.ToSlash(testFixturePath("init")) +// Diagnostics for invalid -target flags +func TestApply_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } - w.Header().Add("X-Terraform-Get", url.String()) - w.WriteHeader(200) + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &ApplyCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-auto-approve", + "-target", target, + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + got := ui.ErrorWriter.String() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) + } } // applyFixtureSchema returns a schema suitable for processing the // configuration in testdata/apply . This schema should be // assigned to a mock provider named "test". -func applyFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ +func applyFixtureSchema() *providers.GetSchemaResponse { + return &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -1584,12 +1878,12 @@ func applyFixtureSchema() *terraform.ProviderSchema { // applyFixtureProvider returns a mock provider that is configured for basic // operation with the configuration in testdata/apply. This mock has -// GetSchemaReturn, PlanResourceChangeFn, and ApplyResourceChangeFn populated, +// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, // with the plan/apply steps just passing through the data determined by // Terraform Core. func applyFixtureProvider() *terraform.MockProvider { p := testProvider() - p.GetSchemaReturn = applyFixtureSchema() + p.GetSchemaResponse = applyFixtureSchema() p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: req.ProposedNewState, @@ -1652,23 +1946,3 @@ foo = "bar" const applyVarFileJSON = ` { "foo": "bar" } ` - -const testApplyDisableBackupStr = ` -ID = bar -Tainted = false -` - -const testApplyDisableBackupStateStr = ` -ID = bar -Tainted = false -` - -const testApplyStateStr = ` -ID = bar -Tainted = false -` - -const testApplyStateDiffStr = ` -ID = bar -Tainted = false -` diff --git a/command/arguments/default.go b/command/arguments/default.go new file mode 100644 index 000000000..4b7bb4024 --- /dev/null +++ b/command/arguments/default.go @@ -0,0 +1,16 @@ +package arguments + +import ( + "flag" + "io/ioutil" +) + +// defaultFlagSet creates a FlagSet with the common settings to override +// the flag package's noisy defaults. +func defaultFlagSet(name string) *flag.FlagSet { + f := flag.NewFlagSet(name, flag.ContinueOnError) + f.SetOutput(ioutil.Discard) + f.Usage = func() {} + + return f +} diff --git a/command/arguments/output.go b/command/arguments/output.go new file mode 100644 index 000000000..f77c283cc --- /dev/null +++ b/command/arguments/output.go @@ -0,0 +1,88 @@ +package arguments + +import ( + "github.com/hashicorp/terraform/tfdiags" +) + +// Output represents the command-line arguments for the output command. +type Output struct { + // Name identifies which root module output to show. If empty, show all + // outputs. + Name string + + // StatePath is an optional path to a state file, from which outputs will + // be loaded. + StatePath string + + // ViewType specifies which output format to use: human, JSON, or "raw". + ViewType ViewType +} + +// ParseOutput processes CLI arguments, returning an Output value and errors. +// If errors are encountered, an Output value is still returned representing +// the best effort interpretation of the arguments. +func ParseOutput(args []string) (*Output, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + output := &Output{} + + var jsonOutput, rawOutput bool + var statePath string + cmdFlags := defaultFlagSet("output") + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") + cmdFlags.BoolVar(&rawOutput, "raw", false, "raw") + cmdFlags.StringVar(&statePath, "state", "", "path") + + if err := cmdFlags.Parse(args); err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + err.Error(), + )) + } + + args = cmdFlags.Args() + if len(args) > 1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unexpected argument", + "The output command expects exactly one argument with the name of an output variable or no arguments to show all outputs.", + )) + } + + if jsonOutput && rawOutput { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output format", + "The -raw and -json options are mutually-exclusive.", + )) + + // Since the desired output format is unknowable, fall back to default + jsonOutput = false + rawOutput = false + } + + output.StatePath = statePath + + if len(args) > 0 { + output.Name = args[0] + } + + if rawOutput && output.Name == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Output name required", + "You must give the name of a single output value when using the -raw option.", + )) + } + + switch { + case jsonOutput: + output.ViewType = ViewJSON + case rawOutput: + output.ViewType = ViewRaw + default: + output.ViewType = ViewHuman + } + + return output, diags +} diff --git a/command/arguments/output_test.go b/command/arguments/output_test.go new file mode 100644 index 000000000..304a156bf --- /dev/null +++ b/command/arguments/output_test.go @@ -0,0 +1,142 @@ +package arguments + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/tfdiags" +) + +func TestParseOutput_valid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Output + }{ + "defaults": { + nil, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + }, + "json": { + []string{"-json"}, + &Output{ + Name: "", + ViewType: ViewJSON, + StatePath: "", + }, + }, + "raw": { + []string{"-raw", "foo"}, + &Output{ + Name: "foo", + ViewType: ViewRaw, + StatePath: "", + }, + }, + "state": { + []string{"-state=foobar.tfstate", "-raw", "foo"}, + &Output{ + Name: "foo", + ViewType: ViewRaw, + StatePath: "foobar.tfstate", + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, diags := ParseOutput(tc.args) + if len(diags) > 0 { + t.Fatalf("unexpected diags: %v", diags) + } + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + }) + } +} + +func TestParseOutput_invalid(t *testing.T) { + testCases := map[string]struct { + args []string + want *Output + wantDiags tfdiags.Diagnostics + }{ + "unknown flag": { + []string{"-boop"}, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Failed to parse command-line flags", + "flag provided but not defined: -boop", + ), + }, + }, + "json and raw specified": { + []string{"-json", "-raw"}, + &Output{ + Name: "", + ViewType: ViewHuman, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Invalid output format", + "The -raw and -json options are mutually-exclusive.", + ), + }, + }, + "raw with no name": { + []string{"-raw"}, + &Output{ + Name: "", + ViewType: ViewRaw, + StatePath: "", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Output name required", + "You must give the name of a single output value when using the -raw option.", + ), + }, + }, + "too many arguments": { + []string{"-raw", "-state=foo.tfstate", "bar", "baz"}, + &Output{ + Name: "bar", + ViewType: ViewRaw, + StatePath: "foo.tfstate", + }, + tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Error, + "Unexpected argument", + "The output command expects exactly one argument with the name of an output variable or no arguments to show all outputs.", + ), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotDiags := ParseOutput(tc.args) + if *got != *tc.want { + t.Fatalf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !reflect.DeepEqual(gotDiags, tc.wantDiags) { + t.Errorf("wrong result\ngot: %s\nwant: %s", spew.Sdump(gotDiags), spew.Sdump(tc.wantDiags)) + } + }) + } +} diff --git a/command/arguments/types.go b/command/arguments/types.go new file mode 100644 index 000000000..0203a04eb --- /dev/null +++ b/command/arguments/types.go @@ -0,0 +1,13 @@ +package arguments + +// ViewType represents which view layer to use for a given command. Not all +// commands will support all view types, and validation that the type is +// supported should happen in the view constructor. +type ViewType rune + +const ( + ViewNone ViewType = 0 + ViewHuman ViewType = 'H' + ViewJSON ViewType = 'J' + ViewRaw ViewType = 'R' +) diff --git a/command/arguments/view.go b/command/arguments/view.go new file mode 100644 index 000000000..3d6372b6a --- /dev/null +++ b/command/arguments/view.go @@ -0,0 +1,43 @@ +package arguments + +// View represents the global command-line arguments which configure the view. +type View struct { + // NoColor is used to disable the use of terminal color codes in all + // output. + NoColor bool + + // CompactWarnings is used to coalesce duplicate warnings, to reduce the + // level of noise when multiple instances of the same warning are raised + // for a configuration. + CompactWarnings bool +} + +// ParseView processes CLI arguments, returning a View value and a +// possibly-modified slice of arguments. If any of the supported flags are +// found, they will be removed from the slice. +func ParseView(args []string) (*View, []string) { + common := &View{} + + // Keep track of the length of the returned slice. When we find an + // argument we support, i will not be incremented. + i := 0 + for _, v := range args { + switch v { + case "-no-color": + common.NoColor = true + case "-compact-warnings": + common.CompactWarnings = true + default: + // Unsupported argument: move left to the current position, and + // increment the index. + args[i] = v + i++ + } + } + + // Reduce the slice to the number of unsupported arguments. Any remaining + // to the right of i have already been moved left. + args = args[:i] + + return common, args +} diff --git a/command/arguments/view_test.go b/command/arguments/view_test.go new file mode 100644 index 000000000..d2e7c3f73 --- /dev/null +++ b/command/arguments/view_test.go @@ -0,0 +1,62 @@ +package arguments + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestParseView(t *testing.T) { + testCases := map[string]struct { + args []string + want *View + wantArgs []string + }{ + "nil": { + nil, + &View{NoColor: false, CompactWarnings: false}, + nil, + }, + "empty": { + []string{}, + &View{NoColor: false, CompactWarnings: false}, + []string{}, + }, + "none matching": { + []string{"-foo", "bar", "-baz"}, + &View{NoColor: false, CompactWarnings: false}, + []string{"-foo", "bar", "-baz"}, + }, + "no-color": { + []string{"-foo", "-no-color", "-baz"}, + &View{NoColor: true, CompactWarnings: false}, + []string{"-foo", "-baz"}, + }, + "compact-warnings": { + []string{"-foo", "-compact-warnings", "-baz"}, + &View{NoColor: false, CompactWarnings: true}, + []string{"-foo", "-baz"}, + }, + "both": { + []string{"-foo", "-no-color", "-compact-warnings", "-baz"}, + &View{NoColor: true, CompactWarnings: true}, + []string{"-foo", "-baz"}, + }, + "both, resulting in empty args": { + []string{"-no-color", "-compact-warnings"}, + &View{NoColor: true, CompactWarnings: true}, + []string{}, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, gotArgs := ParseView(tc.args) + if *got != *tc.want { + t.Errorf("unexpected result\n got: %#v\nwant: %#v", got, tc.want) + } + if !cmp.Equal(gotArgs, tc.wantArgs) { + t.Errorf("unexpected args\n got: %#v\nwant: %#v", gotArgs, tc.wantArgs) + } + }) + } +} diff --git a/command/cliconfig/cliconfig.go b/command/cliconfig/cliconfig.go index 7ac6eeb0b..0896738c9 100644 --- a/command/cliconfig/cliconfig.go +++ b/command/cliconfig/cliconfig.go @@ -287,39 +287,39 @@ func (c *Config) Validate() tfdiags.Diagnostics { // Merge merges two configurations and returns a third entirely // new configuration with the two merged. -func (c1 *Config) Merge(c2 *Config) *Config { +func (c *Config) Merge(c2 *Config) *Config { var result Config result.Providers = make(map[string]string) result.Provisioners = make(map[string]string) - for k, v := range c1.Providers { + for k, v := range c.Providers { result.Providers[k] = v } for k, v := range c2.Providers { - if v1, ok := c1.Providers[k]; ok { + if v1, ok := c.Providers[k]; ok { log.Printf("[INFO] Local %s provider configuration '%s' overrides '%s'", k, v, v1) } result.Providers[k] = v } - for k, v := range c1.Provisioners { + for k, v := range c.Provisioners { result.Provisioners[k] = v } for k, v := range c2.Provisioners { - if v1, ok := c1.Provisioners[k]; ok { + if v1, ok := c.Provisioners[k]; ok { log.Printf("[INFO] Local %s provisioner configuration '%s' overrides '%s'", k, v, v1) } result.Provisioners[k] = v } - result.DisableCheckpoint = c1.DisableCheckpoint || c2.DisableCheckpoint - result.DisableCheckpointSignature = c1.DisableCheckpointSignature || c2.DisableCheckpointSignature + result.DisableCheckpoint = c.DisableCheckpoint || c2.DisableCheckpoint + result.DisableCheckpointSignature = c.DisableCheckpointSignature || c2.DisableCheckpointSignature - result.PluginCacheDir = c1.PluginCacheDir + result.PluginCacheDir = c.PluginCacheDir if result.PluginCacheDir == "" { result.PluginCacheDir = c2.PluginCacheDir } - if (len(c1.Hosts) + len(c2.Hosts)) > 0 { + if (len(c.Hosts) + len(c2.Hosts)) > 0 { result.Hosts = make(map[string]*ConfigHost) - for name, host := range c1.Hosts { + for name, host := range c.Hosts { result.Hosts[name] = host } for name, host := range c2.Hosts { @@ -327,9 +327,9 @@ func (c1 *Config) Merge(c2 *Config) *Config { } } - if (len(c1.Credentials) + len(c2.Credentials)) > 0 { + if (len(c.Credentials) + len(c2.Credentials)) > 0 { result.Credentials = make(map[string]map[string]interface{}) - for host, creds := range c1.Credentials { + for host, creds := range c.Credentials { result.Credentials[host] = creds } for host, creds := range c2.Credentials { @@ -340,9 +340,9 @@ func (c1 *Config) Merge(c2 *Config) *Config { } } - if (len(c1.CredentialsHelpers) + len(c2.CredentialsHelpers)) > 0 { + if (len(c.CredentialsHelpers) + len(c2.CredentialsHelpers)) > 0 { result.CredentialsHelpers = make(map[string]*ConfigCredentialsHelper) - for name, helper := range c1.CredentialsHelpers { + for name, helper := range c.CredentialsHelpers { result.CredentialsHelpers[name] = helper } for name, helper := range c2.CredentialsHelpers { @@ -350,8 +350,8 @@ func (c1 *Config) Merge(c2 *Config) *Config { } } - if (len(c1.ProviderInstallation) + len(c2.ProviderInstallation)) > 0 { - result.ProviderInstallation = append(result.ProviderInstallation, c1.ProviderInstallation...) + if (len(c.ProviderInstallation) + len(c2.ProviderInstallation)) > 0 { + result.ProviderInstallation = append(result.ProviderInstallation, c.ProviderInstallation...) result.ProviderInstallation = append(result.ProviderInstallation, c2.ProviderInstallation...) } diff --git a/command/cliconfig/config_unix.go b/command/cliconfig/config_unix.go index 5922c17ac..f1a9d5936 100644 --- a/command/cliconfig/config_unix.go +++ b/command/cliconfig/config_unix.go @@ -31,7 +31,7 @@ func homeDir() (string, error) { // First prefer the HOME environmental variable if home := os.Getenv("HOME"); home != "" { // FIXME: homeDir gets called from globalPluginDirs during init, before - // the logging is setup. We should move meta initializtion outside of + // the logging is set up. We should move meta initializtion outside of // init, but in the meantime we just need to silence this output. //log.Printf("[DEBUG] Detected home directory from env var: %s", home) diff --git a/command/clistate/local_state.go b/command/clistate/local_state.go index f5f9dbac7..42f921f14 100644 --- a/command/clistate/local_state.go +++ b/command/clistate/local_state.go @@ -12,8 +12,8 @@ import ( "time" multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/hashicorp/terraform/states/statemgr" - "github.com/hashicorp/terraform/terraform" ) // LocalState manages a state storage that is local to the filesystem. diff --git a/command/clistate/state.go b/command/clistate/state.go index 2620c62f2..c04f7d70b 100644 --- a/command/clistate/state.go +++ b/command/clistate/state.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/slowmessage" + "github.com/hashicorp/terraform/internal/helper/slowmessage" "github.com/hashicorp/terraform/states/statemgr" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" diff --git a/command/clistate/state_test.go b/command/clistate/state_test.go index f1ba88ab8..7162538f1 100644 --- a/command/clistate/state_test.go +++ b/command/clistate/state_test.go @@ -2,7 +2,6 @@ package clistate import ( "context" - "fmt" "testing" "github.com/hashicorp/terraform/states/statemgr" @@ -18,7 +17,7 @@ func TestUnlock(t *testing.T) { err := l.Unlock(nil) if err != nil { - fmt.Printf(err.Error()) + t.Log(err.Error()) } else { t.Error("expected error") } diff --git a/command/command.go b/command/command.go index 815a6fa6d..41748d652 100644 --- a/command/command.go +++ b/command/command.go @@ -2,11 +2,8 @@ package command import ( "fmt" - "log" "os" "runtime" - - "github.com/hashicorp/terraform/terraform" ) // Set to true when we're testing @@ -50,45 +47,25 @@ is configured to use a non-local backend. This backend doesn't support this operation. ` -// ModulePath returns the path to the root module from the CLI args. +// ModulePath returns the path to the root module and validates CLI arguments. // -// This centralizes the logic for any commands that expect a module path -// on their CLI args. This will verify that only one argument is given -// and that it is a path to configuration. +// This centralizes the logic for any commands that previously accepted +// a module path via CLI arguments. This will error if any extraneous arguments +// are given and suggest using the -chdir flag instead. // // If your command accepts more than one arg, then change the slice bounds // to pass validation. func ModulePath(args []string) (string, error) { // TODO: test - if len(args) > 1 { - return "", fmt.Errorf("Too many command line arguments. Configuration path expected.") + if len(args) > 0 { + return "", fmt.Errorf("Too many command line arguments. Did you mean to use -chdir?") } - if len(args) == 0 { - path, err := os.Getwd() - if err != nil { - return "", fmt.Errorf("Error getting pwd: %s", err) - } - - return path, nil + path, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("Error getting pwd: %s", err) } - return args[0], nil -} - -func (m *Meta) validateContext(ctx *terraform.Context) bool { - log.Println("[INFO] Validating the context...") - diags := ctx.Validate() - log.Printf("[INFO] Validation result: %d diagnostics", len(diags)) - - if len(diags) > 0 { - m.Ui.Output( - "There are warnings and/or errors related to your configuration. Please\n" + - "fix these before continuing.\n") - - m.showDiagnostics(diags) - } - - return !diags.HasErrors() + return path, nil } diff --git a/command/command_test.go b/command/command_test.go index e28ee6e1f..4454a8404 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -19,8 +19,10 @@ import ( svchost "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/disco" + "github.com/hashicorp/terraform/command/views" "github.com/hashicorp/terraform/internal/getproviders" "github.com/hashicorp/terraform/internal/initwd" + "github.com/hashicorp/terraform/internal/terminal" "github.com/hashicorp/terraform/registry" "github.com/hashicorp/terraform/addrs" @@ -31,7 +33,6 @@ import ( "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/plans/planfile" "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statefile" "github.com/hashicorp/terraform/states/statemgr" @@ -41,6 +42,7 @@ import ( backendInit "github.com/hashicorp/terraform/backend/init" backendLocal "github.com/hashicorp/terraform/backend/local" + legacy "github.com/hashicorp/terraform/internal/legacy/terraform" _ "github.com/hashicorp/terraform/internal/logging" ) @@ -119,23 +121,6 @@ func metaOverridesForProvider(p providers.Interface) *testingOverrides { } } -func metaOverridesForProviderAndProvisioner(p providers.Interface, pr provisioners.Interface) *testingOverrides { - return &testingOverrides{ - Providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider("test"): providers.FactoryFixed(p), - }, - Provisioners: map[string]provisioners.Factory{ - "shell": provisioners.FactoryFixed(pr), - }, - } -} - -func testModule(t *testing.T, name string) *configs.Config { - t.Helper() - c, _ := testModuleWithSnapshot(t, name) - return c -} - func testModuleWithSnapshot(t *testing.T, name string) (*configs.Config, *configload.Snapshot) { t.Helper() @@ -404,7 +389,7 @@ func testStateFileWorkspaceDefault(t *testing.T, workspace string, s *states.Sta // testStateFileRemote writes the state out to the remote statefile // in the cwd. Use `testCwd` to change into a temp cwd. -func testStateFileRemote(t *testing.T, s *terraform.State) string { +func testStateFileRemote(t *testing.T, s *legacy.State) string { t.Helper() path := filepath.Join(DefaultDataDir, DefaultStateFilename) @@ -418,7 +403,7 @@ func testStateFileRemote(t *testing.T, s *terraform.State) string { } defer f.Close() - if err := terraform.WriteState(s, f); err != nil { + if err := legacy.WriteState(s, f); err != nil { t.Fatalf("err: %s", err) } @@ -446,9 +431,9 @@ func testStateRead(t *testing.T, path string) *states.State { // testDataStateRead reads a "data state", which is a file format resembling // our state format v3 that is used only to track current backend settings. // -// This old format still uses *terraform.State, but should be replaced with +// This old format still uses *legacy.State, but should be replaced with // a more specialized type in a later release. -func testDataStateRead(t *testing.T, path string) *terraform.State { +func testDataStateRead(t *testing.T, path string) *legacy.State { t.Helper() f, err := os.Open(path) @@ -457,7 +442,7 @@ func testDataStateRead(t *testing.T, path string) *terraform.State { } defer f.Close() - s, err := terraform.ReadState(f) + s, err := legacy.ReadState(f) if err != nil { t.Fatalf("err: %s", err) } @@ -515,26 +500,6 @@ func testTempDir(t *testing.T) string { return d } -// testRename renames the path to new and returns a function to defer to -// revert the rename. -func testRename(t *testing.T, base, path, new string) func() { - t.Helper() - - if base != "" { - path = filepath.Join(base, path) - new = filepath.Join(base, new) - } - - if err := os.Rename(path, new); err != nil { - t.Fatalf("err: %s", err) - } - - return func() { - // Just re-rename and ignore the return value - testRename(t, "", new, path) - } -} - // testChdir changes the directory and returns a function to defer to // revert the old cwd. func testChdir(t *testing.T, new string) func() { @@ -667,7 +632,7 @@ func testInteractiveInput(t *testing.T, answers []string) func() { // Disable test mode so input is called test = false - // Setup reader/writers + // Set up reader/writers testInputResponse = answers defaultInputReader = bytes.NewBufferString("") defaultInputWriter = new(bytes.Buffer) @@ -688,7 +653,7 @@ func testInputMap(t *testing.T, answers map[string]string) func() { // Disable test mode so input is called test = false - // Setup reader/writers + // Set up reader/writers defaultInputReader = bytes.NewBufferString("") defaultInputWriter = new(bytes.Buffer) @@ -719,7 +684,7 @@ func testInputMap(t *testing.T, answers map[string]string) func() { // be returned about the backend configuration having changed and that // "terraform init" must be run, since the test backend config cache created // by this function contains the hash for an empty configuration. -func testBackendState(t *testing.T, s *states.State, c int) (*terraform.State, *httptest.Server) { +func testBackendState(t *testing.T, s *states.State, c int) (*legacy.State, *httptest.Server) { t.Helper() var b64md5 string @@ -759,8 +724,8 @@ func testBackendState(t *testing.T, s *states.State, c int) (*terraform.State, * configSchema := b.ConfigSchema() hash := backendConfig.Hash(configSchema) - state := terraform.NewState() - state.Backend = &terraform.BackendState{ + state := legacy.NewState() + state.Backend = &legacy.BackendState{ Type: "http", ConfigRaw: json.RawMessage(fmt.Sprintf(`{"address":%q}`, srv.URL)), Hash: uint64(hash), @@ -772,10 +737,10 @@ func testBackendState(t *testing.T, s *states.State, c int) (*terraform.State, * // testRemoteState is used to make a test HTTP server to return a given // state file that can be used for testing legacy remote state. // -// The return values are a *terraform.State instance that should be written +// The return values are a *legacy.State instance that should be written // as the "data state" (really: backend state) and the server that the // returned data state refers to. -func testRemoteState(t *testing.T, s *states.State, c int) (*terraform.State, *httptest.Server) { +func testRemoteState(t *testing.T, s *states.State, c int) (*legacy.State, *httptest.Server) { t.Helper() var b64md5 string @@ -795,10 +760,10 @@ func testRemoteState(t *testing.T, s *states.State, c int) (*terraform.State, *h resp.Write(buf.Bytes()) } - retState := terraform.NewState() + retState := legacy.NewState() srv := httptest.NewServer(http.HandlerFunc(cb)) - b := &terraform.BackendState{ + b := &legacy.BackendState{ Type: "http", } b.SetConfig(cty.ObjectVal(map[string]cty.Value{ @@ -944,8 +909,6 @@ func testCopyDir(t *testing.T, src, dst string) { } } } - - return } // normalizeJSON removes all insignificant whitespace from the given JSON buffer @@ -1067,3 +1030,8 @@ func fakeRegistryHandler(resp http.ResponseWriter, req *http.Request) { resp.Write([]byte(`provider not found`)) } } + +func testView(t *testing.T) (*views.View, func(*testing.T) *terminal.TestOutput) { + streams, done := terminal.StreamsForTesting(t) + return views.NewView(streams), done +} diff --git a/command/console.go b/command/console.go index c6f872b90..75d8edc64 100644 --- a/command/console.go +++ b/command/console.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/helper/wrappedstreams" + "github.com/hashicorp/terraform/internal/helper/wrappedstreams" "github.com/hashicorp/terraform/repl" "github.com/hashicorp/terraform/tfdiags" @@ -35,6 +35,7 @@ func (c *ConsoleCommand) Run(args []string) int { c.Ui.Error(err.Error()) return 1 } + configPath = c.Meta.normalizePath(configPath) // Check for user-supplied plugin path if c.pluginPath, err = c.loadPluginPath(); err != nil { @@ -69,6 +70,9 @@ func (c *ConsoleCommand) Run(args []string) int { return 1 } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + // Build the operation opReq := c.Operation(b) opReq.ConfigDir = configPath @@ -106,7 +110,7 @@ func (c *ConsoleCommand) Run(args []string) int { } }() - // Setup the UI so we can output directly to stdout + // Set up the UI so we can output directly to stdout ui := &cli.BasicUi{ Writer: wrappedstreams.Stdout(), ErrorWriter: wrappedstreams.Stderr(), @@ -171,7 +175,7 @@ func (c *ConsoleCommand) modePiped(session *repl.Session, ui cli.Ui) int { func (c *ConsoleCommand) Help() string { helpText := ` -Usage: terraform console [options] [DIR] +Usage: terraform console [options] Starts an interactive console for experimenting with Terraform interpolations. @@ -183,9 +187,6 @@ Usage: terraform console [options] [DIR] This command will never modify your state. - DIR can be set to a directory with a Terraform state to load. By - default, this will default to the current working directory. - Options: -state=path Path to read state. Defaults to "terraform.tfstate" diff --git a/command/console_interactive.go b/command/console_interactive.go index f8261bb57..92f3b4efd 100644 --- a/command/console_interactive.go +++ b/command/console_interactive.go @@ -9,7 +9,7 @@ import ( "fmt" "io" - "github.com/hashicorp/terraform/helper/wrappedreadline" + "github.com/hashicorp/terraform/internal/helper/wrappedreadline" "github.com/hashicorp/terraform/repl" "github.com/chzyer/readline" diff --git a/command/console_test.go b/command/console_test.go index 921e664db..8fb63ea7e 100644 --- a/command/console_test.go +++ b/command/console_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/providers" "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" ) @@ -52,26 +52,29 @@ func TestConsole_basic(t *testing.T) { } func TestConsole_tfvars(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() // Write a terraform.tvars - varFilePath := filepath.Join(tmp, "terraform.tfvars") + varFilePath := filepath.Join(td, "terraform.tfvars") if err := ioutil.WriteFile(varFilePath, []byte(applyVarFile), 0644); err != nil { t.Fatalf("err: %s", err) } p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, }, }, }, } - ui := cli.NewMockUi() c := &ConsoleCommand{ Meta: Meta{ @@ -84,9 +87,7 @@ func TestConsole_tfvars(t *testing.T) { defer testStdinPipe(t, strings.NewReader("var.foo\n"))() outCloser := testStdoutCapture(t, &output) - args := []string{ - testFixturePath("apply-vars"), - } + args := []string{} code := c.Run(args) outCloser() if code != 0 { @@ -105,16 +106,22 @@ func TestConsole_unsetRequiredVars(t *testing.T) { // "terraform console" producing an interactive prompt for those variables // or producing errors. Instead, it should allow evaluation in that // partial context but see the unset variables values as being unknown. - - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + // + // This test fixture includes variable "foo" {}, which we are + // intentionally not setting here. + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -131,11 +138,7 @@ func TestConsole_unsetRequiredVars(t *testing.T) { defer testStdinPipe(t, strings.NewReader("var.foo\n"))() outCloser := testStdoutCapture(t, &output) - args := []string{ - // This test fixture includes variable "foo" {}, which we are - // intentionally not setting here. - testFixturePath("apply-vars"), - } + args := []string{} code := c.Run(args) outCloser() @@ -149,8 +152,10 @@ func TestConsole_unsetRequiredVars(t *testing.T) { } func TestConsole_variables(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + td := tempDir(t) + testCopyDir(t, testFixturePath("variables"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() p := testProvider() ui := cli.NewMockUi() @@ -168,9 +173,7 @@ func TestConsole_variables(t *testing.T) { "local.snack_bar\n": "[\n \"popcorn\",\n (sensitive),\n]\n", } - args := []string{ - testFixturePath("variables"), - } + args := []string{} for cmd, val := range commands { var output bytes.Buffer @@ -211,9 +214,7 @@ func TestConsole_modules(t *testing.T) { "local.foo\n": "3\n", } - args := []string{ - testFixturePath("modules"), - } + args := []string{} for cmd, val := range commands { var output bytes.Buffer diff --git a/command/e2etest/automation_test.go b/command/e2etest/automation_test.go index b7214bc0f..d65afd177 100644 --- a/command/e2etest/automation_test.go +++ b/command/e2etest/automation_test.go @@ -108,7 +108,7 @@ func TestPlanApplyInAutomation(t *testing.T) { stateResources := state.RootModule().Resources var gotResources []string - for n, _ := range stateResources { + for n := range stateResources { gotResources = append(gotResources, n) } sort.Strings(gotResources) diff --git a/command/e2etest/init_test.go b/command/e2etest/init_test.go index 0f5e19b35..a3b3a0b08 100644 --- a/command/e2etest/init_test.go +++ b/command/e2etest/init_test.go @@ -9,6 +9,8 @@ import ( "strings" "testing" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/e2e" ) @@ -333,7 +335,7 @@ func TestInitProviderNotFound(t *testing.T) { defer tf.Close() t.Run("registry provider not found", func(t *testing.T) { - _, stderr, err := tf.Run("init") + _, stderr, err := tf.Run("init", "-no-color") if err == nil { t.Fatal("expected error, got success") } @@ -351,7 +353,7 @@ func TestInitProviderNotFound(t *testing.T) { t.Fatal(err) } - _, stderr, err := tf.Run("init", "-plugin-dir="+pluginDir) + _, stderr, err := tf.Run("init", "-no-color", "-plugin-dir="+pluginDir) if err == nil { t.Fatal("expected error, got success") } @@ -360,6 +362,26 @@ func TestInitProviderNotFound(t *testing.T) { t.Errorf("expected error message is missing from output:\n%s", stderr) } }) + + t.Run("special characters enabled", func(t *testing.T) { + _, stderr, err := tf.Run("init") + if err == nil { + t.Fatal("expected error, got success") + } + + expectedErr := `╷ +│ Error: Failed to query available provider packages +│` + ` ` + ` +│ Could not retrieve the list of available versions for provider +│ hashicorp/nonexist: provider registry registry.terraform.io does not have a +│ provider named registry.terraform.io/hashicorp/nonexist +╵ + +` + if stripAnsi(stderr) != expectedErr { + t.Errorf("wrong output:\n%s", cmp.Diff(stripAnsi(stderr), expectedErr)) + } + }) } func TestInitProviderWarnings(t *testing.T) { @@ -373,13 +395,13 @@ func TestInitProviderWarnings(t *testing.T) { tf := e2e.NewBinary(terraformBin, fixturePath) defer tf.Close() - _, stderr, err := tf.Run("init") + stdout, _, err := tf.Run("init") if err == nil { t.Fatal("expected error, got success") } - if !strings.Contains(stderr, "This provider is archived and no longer needed. The terraform_remote_state\ndata source is built into the latest Terraform release.") { - t.Errorf("expected warning message is missing from output:\n%s", stderr) + if !strings.Contains(stdout, "This provider is archived and no longer needed.") { + t.Errorf("expected warning message is missing from output:\n%s", stdout) } } diff --git a/command/e2etest/primary_test.go b/command/e2etest/primary_test.go index 28aa3c451..304ddedc9 100644 --- a/command/e2etest/primary_test.go +++ b/command/e2etest/primary_test.go @@ -59,8 +59,8 @@ func TestPrimarySeparatePlan(t *testing.T) { t.Errorf("incorrect plan tally; want 1 to add:\n%s", stdout) } - if !strings.Contains(stdout, "This plan was saved to: tfplan") { - t.Errorf("missing \"This plan was saved to...\" message in plan output\n%s", stdout) + if !strings.Contains(stdout, "Saved the plan to: tfplan") { + t.Errorf("missing \"Saved the plan to...\" message in plan output\n%s", stdout) } if !strings.Contains(stdout, "terraform apply \"tfplan\"") { t.Errorf("missing next-step instruction in plan output\n%s", stdout) @@ -107,7 +107,7 @@ func TestPrimarySeparatePlan(t *testing.T) { stateResources := state.RootModule().Resources var gotResources []string - for n, _ := range stateResources { + for n := range stateResources { gotResources = append(gotResources, n) } sort.Strings(gotResources) @@ -154,13 +154,13 @@ func TestPrimaryChdirOption(t *testing.T) { defer tf.Close() //// INIT - stdout, stderr, err := tf.Run("-chdir=subdir", "init") + _, stderr, err := tf.Run("-chdir=subdir", "init") if err != nil { t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) } //// PLAN - stdout, stderr, err = tf.Run("-chdir=subdir", "plan", "-out=tfplan") + stdout, stderr, err := tf.Run("-chdir=subdir", "plan", "-out=tfplan") if err != nil { t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) } @@ -169,8 +169,8 @@ func TestPrimaryChdirOption(t *testing.T) { t.Errorf("incorrect plan tally; want 0 to add:\n%s", stdout) } - if !strings.Contains(stdout, "This plan was saved to: tfplan") { - t.Errorf("missing \"This plan was saved to...\" message in plan output\n%s", stdout) + if !strings.Contains(stdout, "Saved the plan to: tfplan") { + t.Errorf("missing \"Saved the plan to...\" message in plan output\n%s", stdout) } if !strings.Contains(stdout, "terraform apply \"tfplan\"") { t.Errorf("missing next-step instruction in plan output\n%s", stdout) diff --git a/command/e2etest/provider_dev_test.go b/command/e2etest/provider_dev_test.go index 3c779c5a3..401eb779f 100644 --- a/command/e2etest/provider_dev_test.go +++ b/command/e2etest/provider_dev_test.go @@ -33,7 +33,7 @@ func TestProviderDevOverrides(t *testing.T) { // such as if it stops being buildable into an independent executable. providerExeDir := filepath.Join(tf.WorkDir(), "pkgdir") providerExePrefix := filepath.Join(providerExeDir, "terraform-provider-test_") - providerExe := e2e.GoBuild("github.com/hashicorp/terraform/builtin/bins/provider-test", providerExePrefix) + providerExe := e2e.GoBuild("github.com/hashicorp/terraform/internal/provider-simple/main", providerExePrefix) t.Logf("temporary provider executable is %s", providerExe) err := ioutil.WriteFile(filepath.Join(tf.WorkDir(), "dev.tfrc"), []byte(fmt.Sprintf(` @@ -61,7 +61,8 @@ func TestProviderDevOverrides(t *testing.T) { // dev overrides are always ready to use and don't need any special action // to "install" them. This test is mimicking the a happy path of going // directly from "go build" to validate/plan/apply without interacting - // with any registries, mirrors, lock files, etc. + // with any registries, mirrors, lock files, etc. To verify "terraform + // init" does actually show a warning, that behavior is tested at the end. stdout, stderr, err = tf.Run("validate") if err != nil { t.Fatalf("unexpected error: %s\n%s", err, stderr) @@ -70,7 +71,18 @@ func TestProviderDevOverrides(t *testing.T) { if got, want := stdout, `The configuration is valid, but`; !strings.Contains(got, want) { t.Errorf("stdout doesn't include the success message\nwant: %s\n%s", want, got) } - if got, want := stderr, `Provider development overrides are in effect`; !strings.Contains(got, want) { + if got, want := stdout, `Provider development overrides are in effect`; !strings.Contains(got, want) { t.Errorf("stdout doesn't include the warning about development overrides\nwant: %s\n%s", want, got) } + + stdout, stderr, err = tf.Run("init") + if err == nil { + t.Fatal("expected error: Failed to query available provider packages") + } + if got, want := stdout, `Provider development overrides are in effect`; !strings.Contains(got, want) { + t.Errorf("stdout doesn't include the warning about development overrides\nwant: %s\n%s", want, got) + } + if got, want := stderr, `Failed to query available provider packages`; !strings.Contains(got, want) { + t.Errorf("stderr doesn't include the error about listing unavailable development provider\nwant: %s\n%s", want, got) + } } diff --git a/command/e2etest/providers_mirror_test.go b/command/e2etest/providers_mirror_test.go index 32512a665..3d229e6b6 100644 --- a/command/e2etest/providers_mirror_test.go +++ b/command/e2etest/providers_mirror_test.go @@ -54,7 +54,10 @@ func TestTerraformProvidersMirror(t *testing.T) { "registry.terraform.io/hashicorp/template/terraform-provider-template_2.1.1_windows_386.zip", } var got []string - err = filepath.Walk(outputDir, func(path string, info os.FileInfo, err error) error { + walkErr := filepath.Walk(outputDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } if info.IsDir() { return nil // we only care about leaf files for this test } @@ -65,8 +68,8 @@ func TestTerraformProvidersMirror(t *testing.T) { got = append(got, filepath.ToSlash(relPath)) return nil }) - if err != nil { - t.Fatal(err) + if walkErr != nil { + t.Fatal(walkErr) } sort.Strings(got) diff --git a/command/e2etest/provisioner_plugin_test.go b/command/e2etest/provisioner_plugin_test.go new file mode 100644 index 000000000..d801eaef7 --- /dev/null +++ b/command/e2etest/provisioner_plugin_test.go @@ -0,0 +1,65 @@ +package e2etest + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/hashicorp/terraform/e2e" +) + +// TestProviderDevOverrides is a test that terraform can execute a 3rd party +// provisioner plugin. +func TestProvisionerPlugin(t *testing.T) { + t.Parallel() + + // This test reaches out to releases.hashicorp.com to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + tf := e2e.NewBinary(terraformBin, "testdata/provisioner-plugin") + defer tf.Close() + + // In order to do a decent end-to-end test for this case we will need a + // real enough provisioner plugin to try to run and make sure we are able + // to actually run it. Here will build the local-exec provisioner into a + // binary called test-provisioner + provisionerExePrefix := filepath.Join(tf.WorkDir(), "terraform-provisioner-test_") + provisionerExe := e2e.GoBuild("github.com/hashicorp/terraform/internal/provisioner-local-exec/main", provisionerExePrefix) + + // provisioners must use the old binary name format, so rename this binary + newExe := filepath.Join(tf.WorkDir(), "terraform-provisioner-test") + if _, err := os.Stat(newExe); !os.IsNotExist(err) { + t.Fatalf("%q already exists", newExe) + } + if err := os.Rename(provisionerExe, newExe); err != nil { + t.Fatalf("error renaming provisioner binary: %v", err) + } + provisionerExe = newExe + + t.Logf("temporary provisioner executable is %s", provisionerExe) + + //// INIT + _, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + //// PLAN + _, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + //// APPLY + stdout, stderr, err := tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "HelloProvisioner") { + t.Fatalf("missing provisioner output:\n%s", stdout) + } +} diff --git a/command/e2etest/provisioner_test.go b/command/e2etest/provisioner_test.go new file mode 100644 index 000000000..39bfa3757 --- /dev/null +++ b/command/e2etest/provisioner_test.go @@ -0,0 +1,44 @@ +package e2etest + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/e2e" +) + +// TestProviderDevOverrides is a test that terraform can execute a 3rd party +// provisioner plugin. +func TestProvisioner(t *testing.T) { + t.Parallel() + + // This test reaches out to releases.hashicorp.com to download the + // template and null providers, so it can only run if network access is + // allowed. + skipIfCannotAccessNetwork(t) + + tf := e2e.NewBinary(terraformBin, "testdata/provisioner") + defer tf.Close() + + //// INIT + _, stderr, err := tf.Run("init") + if err != nil { + t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr) + } + + //// PLAN + _, stderr, err = tf.Run("plan", "-out=tfplan") + if err != nil { + t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr) + } + + //// APPLY + stdout, stderr, err := tf.Run("apply", "tfplan") + if err != nil { + t.Fatalf("unexpected apply error: %s\nstderr:\n%s", err, stderr) + } + + if !strings.Contains(stdout, "HelloProvisioner") { + t.Fatalf("missing provisioner output:\n%s", stdout) + } +} diff --git a/command/e2etest/strip_ansi.go b/command/e2etest/strip_ansi.go new file mode 100644 index 000000000..22b66bae3 --- /dev/null +++ b/command/e2etest/strip_ansi.go @@ -0,0 +1,13 @@ +package e2etest + +import ( + "regexp" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var ansiRe = regexp.MustCompile(ansi) + +func stripAnsi(str string) string { + return ansiRe.ReplaceAllString(str, "") +} diff --git a/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf b/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf index 195cb1a3b..9c629f722 100644 --- a/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf +++ b/command/e2etest/testdata/provider-dev-override/provider-dev-override.tf @@ -1,14 +1,11 @@ terraform { required_providers { - test = { + simple = { source = "example.com/test/test" version = "2.0.0" } } } -provider "test" { -} - -data "test_data_source" "test" { +data "simple_resource" "test" { } diff --git a/command/e2etest/testdata/provisioner-plugin/main.tf b/command/e2etest/testdata/provisioner-plugin/main.tf new file mode 100644 index 000000000..8e6268b96 --- /dev/null +++ b/command/e2etest/testdata/provisioner-plugin/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "a" { + provisioner "test" { + command = "echo HelloProvisioner" + } +} diff --git a/command/e2etest/testdata/provisioner/main.tf b/command/e2etest/testdata/provisioner/main.tf new file mode 100644 index 000000000..c37ad380b --- /dev/null +++ b/command/e2etest/testdata/provisioner/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "a" { + provisioner "local-exec" { + command = "echo HelloProvisioner" + } +} diff --git a/command/e2etest/testdata/test-provider/main.tf b/command/e2etest/testdata/test-provider/main.tf index 864643ef6..a4de134c8 100644 --- a/command/e2etest/testdata/test-provider/main.tf +++ b/command/e2etest/testdata/test-provider/main.tf @@ -1,6 +1,10 @@ -provider "test" { - +terraform { + required_providers { + simple = { + source = "hashicorp/test" + } + } } -resource "test_resource_signal" "test" { +resource "simple_resource" "test" { } diff --git a/command/e2etest/unmanaged_test.go b/command/e2etest/unmanaged_test.go index ab8e19aa1..b32748ada 100644 --- a/command/e2etest/unmanaged_test.go +++ b/command/e2etest/unmanaged_test.go @@ -11,9 +11,9 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/builtin/providers/test" "github.com/hashicorp/terraform/e2e" - grpcplugin "github.com/hashicorp/terraform/helper/plugin" + "github.com/hashicorp/terraform/internal/grpcwrap" + simple "github.com/hashicorp/terraform/internal/provider-simple" proto "github.com/hashicorp/terraform/internal/tfplugin5" tfplugin "github.com/hashicorp/terraform/plugin" ) @@ -42,7 +42,7 @@ type reattachConfigAddr struct { type providerServer struct { sync.Mutex - *grpcplugin.GRPCProviderServer + proto.ProviderServer planResourceChangeCalled bool applyResourceChangeCalled bool } @@ -52,7 +52,7 @@ func (p *providerServer) PlanResourceChange(ctx context.Context, req *proto.Plan defer p.Unlock() p.planResourceChangeCalled = true - return p.GRPCProviderServer.PlanResourceChange(ctx, req) + return p.ProviderServer.PlanResourceChange(ctx, req) } func (p *providerServer) ApplyResourceChange(ctx context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { @@ -60,7 +60,7 @@ func (p *providerServer) ApplyResourceChange(ctx context.Context, req *proto.App defer p.Unlock() p.applyResourceChangeCalled = true - return p.GRPCProviderServer.ApplyResourceChange(ctx, req) + return p.ProviderServer.ApplyResourceChange(ctx, req) } func (p *providerServer) PlanResourceChangeCalled() bool { @@ -99,7 +99,7 @@ func TestUnmanagedSeparatePlan(t *testing.T) { reattachCh := make(chan *plugin.ReattachConfig) closeCh := make(chan struct{}) provider := &providerServer{ - GRPCProviderServer: grpcplugin.NewGRPCProviderServerShim(test.Provider()), + ProviderServer: grpcwrap.Provider(simple.Provider()), } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -140,6 +140,10 @@ func TestUnmanagedSeparatePlan(t *testing.T) { }, }, }) + if err != nil { + t.Fatal(err) + } + tf.AddEnv("TF_REATTACH_PROVIDERS=" + string(reattachStr)) tf.AddEnv("PLUGIN_PROTOCOL_VERSION=5") @@ -164,7 +168,7 @@ func TestUnmanagedSeparatePlan(t *testing.T) { } if !provider.PlanResourceChangeCalled() { - t.Error("PlanResourceChange not called on in-process provider") + t.Error("PlanResourceChange not called on un-managed provider") } //// APPLY @@ -174,7 +178,7 @@ func TestUnmanagedSeparatePlan(t *testing.T) { } if !provider.ApplyResourceChangeCalled() { - t.Error("ApplyResourceChange not called on in-process provider") + t.Error("ApplyResourceChange not called on un-managed provider") } provider.ResetApplyResourceChangeCalled() diff --git a/command/e2etest/version_test.go b/command/e2etest/version_test.go index 261877290..1f19ecf64 100644 --- a/command/e2etest/version_test.go +++ b/command/e2etest/version_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/hashicorp/terraform/e2e" - tfcore "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/version" ) func TestVersion(t *testing.T) { @@ -31,7 +31,7 @@ func TestVersion(t *testing.T) { t.Errorf("unexpected stderr output:\n%s", stderr) } - wantVersion := fmt.Sprintf("Terraform v%s", tfcore.VersionString()) + wantVersion := fmt.Sprintf("Terraform v%s", version.String()) if !strings.Contains(stdout, wantVersion) { t.Errorf("output does not contain our current version %q:\n%s", wantVersion, stdout) } @@ -63,7 +63,7 @@ func TestVersionWithProvider(t *testing.T) { t.Errorf("unexpected stderr output:\n%s", stderr) } - wantVersion := fmt.Sprintf("Terraform v%s", tfcore.VersionString()) + wantVersion := fmt.Sprintf("Terraform v%s", version.String()) if !strings.Contains(stdout, wantVersion) { t.Errorf("output does not contain our current version %q:\n%s", wantVersion, stdout) } diff --git a/command/flag_kv.go b/command/flag_kv.go index 9f38018af..b084c5135 100644 --- a/command/flag_kv.go +++ b/command/flag_kv.go @@ -3,11 +3,6 @@ package command import ( "fmt" "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/tfdiags" ) // FlagStringKV is a flag.Value implementation for parsing user variables @@ -46,34 +41,3 @@ func (v *FlagStringSlice) Set(raw string) error { return nil } - -// FlagTargetSlice is a flag.Value implementation for parsing target addresses -// from the command line, such as -target=aws_instance.foo -target=aws_vpc.bar . -type FlagTargetSlice []addrs.Targetable - -func (v *FlagTargetSlice) String() string { - return "" -} - -func (v *FlagTargetSlice) Set(raw string) error { - // FIXME: This is not an ideal way to deal with this because it requires - // us to do parsing in a context where we can't nicely return errors - // to the user. - - var diags tfdiags.Diagnostics - synthFilename := fmt.Sprintf("-target=%q", raw) - traversal, syntaxDiags := hclsyntax.ParseTraversalAbs([]byte(raw), synthFilename, hcl.Pos{Line: 1, Column: 1}) - diags = diags.Append(syntaxDiags) - if syntaxDiags.HasErrors() { - return diags.Err() - } - - target, targetDiags := addrs.ParseTarget(traversal) - diags = diags.Append(targetDiags) - if targetDiags.HasErrors() { - return diags.Err() - } - - *v = append(*v, target.Subject) - return nil -} diff --git a/command/format/diagnostic.go b/command/format/diagnostic.go index 38626e30b..d89de7a14 100644 --- a/command/format/diagnostic.go +++ b/command/format/diagnostic.go @@ -16,6 +16,11 @@ import ( "github.com/zclconf/go-cty/cty" ) +var disabledColorize = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, +} + // Diagnostic formats a single diagnostic message. // // The width argument specifies at what column the diagnostic messages will @@ -31,11 +36,31 @@ func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *color var buf bytes.Buffer + // these leftRule* variables are markers for the beginning of the lines + // containing the diagnostic that are intended to help sighted users + // better understand the information heirarchy when diagnostics appear + // alongside other information or alongside other diagnostics. + // + // Without this, it seems (based on folks sharing incomplete messages when + // asking questions, or including extra content that's not part of the + // diagnostic) that some readers have trouble easily identifying which + // text belongs to the diagnostic and which does not. + var leftRuleLine, leftRuleStart, leftRuleEnd string + var leftRuleWidth int // in visual character cells + switch diag.Severity() { case tfdiags.Error: - buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) + buf.WriteString(color.Color("[bold][red]Error: [reset]")) + leftRuleLine = color.Color("[red]│[reset] ") + leftRuleStart = color.Color("[red]╷[reset]") + leftRuleEnd = color.Color("[red]╵[reset]") + leftRuleWidth = 2 case tfdiags.Warning: - buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) + buf.WriteString(color.Color("[bold][yellow]Warning: [reset]")) + leftRuleLine = color.Color("[yellow]│[reset] ") + leftRuleStart = color.Color("[yellow]╷[reset]") + leftRuleEnd = color.Color("[yellow]╵[reset]") + leftRuleWidth = 2 default: // Clear out any coloring that might be applied by Terraform's UI helper, // so our result is not context-sensitive. @@ -51,135 +76,90 @@ func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *color fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) if sourceRefs.Subject != nil { - // We'll borrow HCL's range implementation here, because it has some - // handy features to help us produce a nice source code snippet. - highlightRange := sourceRefs.Subject.ToHCL() - snippetRange := highlightRange - if sourceRefs.Context != nil { - snippetRange = sourceRefs.Context.ToHCL() - } - - // Make sure the snippet includes the highlight. This should be true - // for any reasonable diagnostic, but we'll make sure. - snippetRange = hcl.RangeOver(snippetRange, highlightRange) - if snippetRange.Empty() { - snippetRange.End.Byte++ - snippetRange.End.Column++ - } - if highlightRange.Empty() { - highlightRange.End.Byte++ - highlightRange.End.Column++ - } - - var src []byte - if sources != nil { - src = sources[snippetRange.Filename] - } - if src == nil { - // This should generally not happen, as long as sources are always - // loaded through the main loader. We may load things in other - // ways in weird cases, so we'll tolerate it at the expense of - // a not-so-helpful error message. - fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) - } else { - file, offset := parseRange(src, highlightRange) - - headerRange := highlightRange - - contextStr := hcled.ContextString(file, offset-1) - if contextStr != "" { - contextStr = ", in " + contextStr - } - - fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) - - // Config snippet rendering - sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) - for sc.Scan() { - lineRange := sc.Range() - if !lineRange.Overlaps(snippetRange) { - continue - } - if !lineRange.Overlap(highlightRange).Empty() { - beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) - before := beforeRange.SliceBytes(src) - highlighted := highlightedRange.SliceBytes(src) - after := afterRange.SliceBytes(src) - fmt.Fprintf( - &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), - lineRange.Start.Line, - before, highlighted, after, - ) - } else { - fmt.Fprintf( - &buf, "%4d: %s\n", - lineRange.Start.Line, - lineRange.SliceBytes(src), - ) - } - } - - } - - if fromExpr := diag.FromExpr(); fromExpr != nil { - // We may also be able to generate information about the dynamic - // values of relevant variables at the point of evaluation, then. - // This is particularly useful for expressions that get evaluated - // multiple times with different values, such as blocks using - // "count" and "for_each", or within "for" expressions. - expr := fromExpr.Expression - ctx := fromExpr.EvalContext - vars := expr.Variables() - stmts := make([]string, 0, len(vars)) - seen := make(map[string]struct{}, len(vars)) - Traversals: - for _, traversal := range vars { - for len(traversal) > 1 { - val, diags := traversal.TraverseAbs(ctx) - if diags.HasErrors() { - // Skip anything that generates errors, since we probably - // already have the same error in our diagnostics set - // already. - traversal = traversal[:len(traversal)-1] - continue - } - - traversalStr := traversalStr(traversal) - if _, exists := seen[traversalStr]; exists { - continue Traversals // don't show duplicates when the same variable is referenced multiple times - } - switch { - case !val.IsKnown(): - // Can't say anything about this yet, then. - continue Traversals - case val.IsNull(): - stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) - default: - stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) - } - seen[traversalStr] = struct{}{} - } - } - - sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? - - if len(stmts) > 0 { - fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n")) - } - for _, stmt := range stmts { - fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt) - } - } - - buf.WriteByte('\n') + buf = appendSourceSnippets(buf, diag, sources, color) } if desc.Detail != "" { - if width != 0 { + paraWidth := width - leftRuleWidth - 1 // leave room for the left rule + if paraWidth > 0 { lines := strings.Split(desc.Detail, "\n") for _, line := range lines { if !strings.HasPrefix(line, " ") { - line = wordwrap.WrapString(line, uint(width)) + line = wordwrap.WrapString(line, uint(paraWidth)) + } + fmt.Fprintf(&buf, "%s\n", line) + } + } else { + fmt.Fprintf(&buf, "%s\n", desc.Detail) + } + } + + // Before we return, we'll finally add the left rule prefixes to each + // line so that the overall message is visually delimited from what's + // around it. We'll do that by scanning over what we already generated + // and adding the prefix for each line. + var ruleBuf strings.Builder + sc := bufio.NewScanner(&buf) + ruleBuf.WriteString(leftRuleStart) + ruleBuf.WriteByte('\n') + for sc.Scan() { + line := sc.Text() + prefix := leftRuleLine + if line == "" { + // Don't print the space after the line if there would be nothing + // after it anyway. + prefix = strings.TrimSpace(prefix) + } + ruleBuf.WriteString(prefix) + ruleBuf.WriteString(line) + ruleBuf.WriteByte('\n') + } + ruleBuf.WriteString(leftRuleEnd) + ruleBuf.WriteByte('\n') + + return ruleBuf.String() +} + +// DiagnosticPlain is an alternative to Diagnostic which minimises the use of +// virtual terminal formatting sequences. +// +// It is intended for use in automation and other contexts in which diagnostic +// messages are parsed from the Terraform output. +func DiagnosticPlain(diag tfdiags.Diagnostic, sources map[string][]byte, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + switch diag.Severity() { + case tfdiags.Error: + buf.WriteString("\nError: ") + case tfdiags.Warning: + buf.WriteString("\nWarning: ") + default: + buf.WriteString("\n") + } + + desc := diag.Description() + sourceRefs := diag.Source() + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, "%s\n\n", desc.Summary) + + if sourceRefs.Subject != nil { + buf = appendSourceSnippets(buf, diag, sources, disabledColorize) + } + + if desc.Detail != "" { + if width > 1 { + lines := strings.Split(desc.Detail, "\n") + for _, line := range lines { + if !strings.HasPrefix(line, " ") { + line = wordwrap.WrapString(line, uint(width-1)) } fmt.Fprintf(&buf, "%s\n", line) } @@ -356,3 +336,139 @@ func compactValueStr(val cty.Value) string { return ty.FriendlyName() } } + +func appendSourceSnippets(buf bytes.Buffer, diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize) bytes.Buffer { + sourceRefs := diag.Source() + + // We'll borrow HCL's range implementation here, because it has some + // handy features to help us produce a nice source code snippet. + highlightRange := sourceRefs.Subject.ToHCL() + snippetRange := highlightRange + if sourceRefs.Context != nil { + snippetRange = sourceRefs.Context.ToHCL() + } + + // Make sure the snippet includes the highlight. This should be true + // for any reasonable diagnostic, but we'll make sure. + snippetRange = hcl.RangeOver(snippetRange, highlightRange) + if snippetRange.Empty() { + snippetRange.End.Byte++ + snippetRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + var src []byte + if sources != nil { + src = sources[snippetRange.Filename] + } + if src == nil { + // This should generally not happen, as long as sources are always + // loaded through the main loader. We may load things in other + // ways in weird cases, so we'll tolerate it at the expense of + // a not-so-helpful error message. + fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) + } else { + file, offset := parseRange(src, highlightRange) + + headerRange := highlightRange + + contextStr := hcled.ContextString(file, offset-1) + if contextStr != "" { + contextStr = ", in " + contextStr + } + + fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) + + // Config snippet rendering + sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snippetRange) { + continue + } + if !lineRange.Overlap(highlightRange).Empty() { + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), + lineRange.Start.Line, + before, highlighted, after, + ) + } else { + fmt.Fprintf( + &buf, "%4d: %s\n", + lineRange.Start.Line, + lineRange.SliceBytes(src), + ) + } + } + + } + + if fromExpr := diag.FromExpr(); fromExpr != nil { + // We may also be able to generate information about the dynamic + // values of relevant variables at the point of evaluation, then. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + expr := fromExpr.Expression + ctx := fromExpr.EvalContext + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + Traversals: + for _, traversal := range vars { + for len(traversal) > 1 { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + traversal = traversal[:len(traversal)-1] + continue + } + + traversalStr := traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue Traversals // don't show duplicates when the same variable is referenced multiple times + } + switch { + case val.IsMarked(): + // We won't say anything at all about sensitive values, + // because we might give away something that was + // sensitive about them. + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] has a sensitive value"), traversalStr)) + case !val.IsKnown(): + if ty := val.Type(); ty != cty.DynamicPseudoType { + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is a %s, known only after apply"), traversalStr, ty.FriendlyName())) + } else { + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] will be known only after apply"), traversalStr)) + } + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) + } + seen[traversalStr] = struct{}{} + } + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + + if len(stmts) > 0 { + fmt.Fprint(&buf, color.Color(" [dark_gray]├────────────────[reset]\n")) + } + for _, stmt := range stmts { + fmt.Fprintf(&buf, color.Color(" [dark_gray]│[reset] %s\n"), stmt) + } + } + + buf.WriteByte('\n') + + return buf +} diff --git a/command/format/diagnostic_test.go b/command/format/diagnostic_test.go index 2062f633c..cfbe27191 100644 --- a/command/format/diagnostic_test.go +++ b/command/format/diagnostic_test.go @@ -1,15 +1,441 @@ package format import ( + "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/tfdiags" ) +func TestDiagnostic(t *testing.T) { + + tests := map[string]struct { + Diag interface{} + Want string + }{ + "sourceless error": { + tfdiags.Sourceless( + tfdiags.Error, + "A sourceless error", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]A sourceless error[reset] +[red]│[reset] +[red]│[reset] It has no source references but it +[red]│[reset] does have a pretty long detail that +[red]│[reset] should wrap over multiple lines. +[red]╵[reset] +`, + }, + "sourceless warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "A sourceless warning", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + `[yellow]╷[reset] +[yellow]│[reset] [bold][yellow]Warning: [reset][bold]A sourceless warning[reset] +[yellow]│[reset] +[yellow]│[reset] It has no source references but it +[yellow]│[reset] does have a pretty long detail that +[yellow]│[reset] should wrap over multiple lines. +[yellow]╵[reset] +`, + }, + "error with source code subject": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and known expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah"), + }), + }, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] is "blah" +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and expression referring to sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah").Mark("sensitive"), + }), + }, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] has a sensitive value +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and unknown string expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.String), + }), + }, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] is a string, known only after apply +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + "error with source code subject and unknown expression of unknown type": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.DynamicPseudoType), + }), + }, + }, + }, + `[red]╷[reset] +[red]│[reset] [bold][red]Error: [reset][bold]Bad bad bad[reset] +[red]│[reset] +[red]│[reset] on test.tf line 1: +[red]│[reset] 1: test [underline]source[reset] code +[red]│[reset] [dark_gray]├────────────────[reset] +[red]│[reset] [dark_gray]│[reset] [bold]boop.beep[reset] will be known only after apply +[red]│[reset] +[red]│[reset] Whatever shall we do? +[red]╵[reset] +`, + }, + } + + sources := map[string][]byte{ + "test.tf": []byte(`test source code`), + } + + // This empty Colorize just passes through all of the formatting codes + // untouched, because it doesn't define any formatting keywords. + colorize := &colorstring.Colorize{} + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(test.Diag) // to normalize it into a tfdiag.Diagnostic + diag := diags[0] + got := strings.TrimSpace(Diagnostic(diag, sources, colorize, 40)) + want := strings.TrimSpace(test.Want) + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) + } + }) + } +} + +func TestDiagnosticPlain(t *testing.T) { + + tests := map[string]struct { + Diag interface{} + Want string + }{ + "sourceless error": { + tfdiags.Sourceless( + tfdiags.Error, + "A sourceless error", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + ` +Error: A sourceless error + +It has no source references but it does +have a pretty long detail that should +wrap over multiple lines. +`, + }, + "sourceless warning": { + tfdiags.Sourceless( + tfdiags.Warning, + "A sourceless warning", + "It has no source references but it does have a pretty long detail that should wrap over multiple lines.", + ), + ` +Warning: A sourceless warning + +It has no source references but it does +have a pretty long detail that should +wrap over multiple lines. +`, + }, + "error with source code subject": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + +Whatever shall we do? +`, + }, + "error with source code subject and known expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah"), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep is "blah" + +Whatever shall we do? +`, + }, + "error with source code subject and expression referring to sensitive value": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.StringVal("blah").Mark("sensitive"), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep has a sensitive value + +Whatever shall we do? +`, + }, + "error with source code subject and unknown string expression": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.String), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep is a string, known only after apply + +Whatever shall we do? +`, + }, + "error with source code subject and unknown expression of unknown type": { + &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Bad bad bad", + Detail: "Whatever shall we do?", + Subject: &hcl.Range{ + Filename: "test.tf", + Start: hcl.Pos{Line: 1, Column: 6, Byte: 5}, + End: hcl.Pos{Line: 1, Column: 12, Byte: 11}, + }, + Expression: hcltest.MockExprTraversal(hcl.Traversal{ + hcl.TraverseRoot{Name: "boop"}, + hcl.TraverseAttr{Name: "beep"}, + }), + EvalContext: &hcl.EvalContext{ + Variables: map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "beep": cty.UnknownVal(cty.DynamicPseudoType), + }), + }, + }, + }, + ` +Error: Bad bad bad + + on test.tf line 1: + 1: test source code + ├──────────────── + │ boop.beep will be known only after apply + +Whatever shall we do? +`, + }, + } + + sources := map[string][]byte{ + "test.tf": []byte(`test source code`), + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var diags tfdiags.Diagnostics + diags = diags.Append(test.Diag) // to normalize it into a tfdiag.Diagnostic + diag := diags[0] + got := strings.TrimSpace(DiagnosticPlain(diag, sources, 40)) + want := strings.TrimSpace(test.Want) + if got != want { + t.Errorf("wrong result\ngot:\n%s\n\nwant:\n%s\n\n", got, want) + } + }) + } +} + func TestDiagnosticWarningsCompact(t *testing.T) { var diags tfdiags.Diagnostics diags = diags.Append(tfdiags.SimpleWarning("foo")) @@ -103,16 +529,17 @@ func TestDiagnostic_nonOverlappingHighlightContext(t *testing.T) { Reset: true, Disable: true, } - expected := ` -Error: Some error - - on source.tf line 1: - 1: x = somefunc("testing", { - 2: alpha = "foo" - 3: beta = "bar" - 4: }) - -... + expected := `╷ +│ Error: Some error +│ +│ on source.tf line 1: +│ 1: x = somefunc("testing", { +│ 2: alpha = "foo" +│ 3: beta = "bar" +│ 4: }) +│ +│ ... +╵ ` output := Diagnostic(diags[0], sources, color, 80) @@ -151,6 +578,50 @@ func TestDiagnostic_emptyOverlapHighlightContext(t *testing.T) { Reset: true, Disable: true, } + expected := `╷ +│ Error: Some error +│ +│ on source.tf line 3, in variable "x": +│ 2: default = { +│ 3: "foo" +│ 4: } +│ +│ ... +╵ +` + output := Diagnostic(diags[0], sources, color, 80) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnosticPlain_emptyOverlapHighlightContext(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Some error", + Detail: "...", + Subject: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 3, Column: 10, Byte: 38}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + Context: &hcl.Range{ + Filename: "source.tf", + Start: hcl.Pos{Line: 2, Column: 13, Byte: 27}, + End: hcl.Pos{Line: 4, Column: 1, Byte: 39}, + }, + }) + sources := map[string][]byte{ + "source.tf": []byte(`variable "x" { + default = { + "foo" + } +`), + } + expected := ` Error: Some error @@ -161,7 +632,7 @@ Error: Some error ... ` - output := Diagnostic(diags[0], sources, color, 80) + output := DiagnosticPlain(diags[0], sources, 80) if output != expected { t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) @@ -181,6 +652,35 @@ func TestDiagnostic_wrapDetailIncludingCommand(t *testing.T) { Reset: true, Disable: true, } + expected := `╷ +│ Error: Everything went wrong +│ +│ This is a very long sentence about whatever went wrong which is supposed +│ to wrap onto multiple lines. Thank-you very much for listening. +│ +│ To fix this, run this very long command: +│ terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces +│ +│ Here is a coda which is also long enough to wrap and so it should +│ eventually make it onto multiple lines. THE END +╵ +` + output := Diagnostic(diags[0], nil, color, 76) + + if output != expected { + t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) + } +} + +func TestDiagnosticPlain_wrapDetailIncludingCommand(t *testing.T) { + var diags tfdiags.Diagnostics + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Everything went wrong", + Detail: "This is a very long sentence about whatever went wrong which is supposed to wrap onto multiple lines. Thank-you very much for listening.\n\nTo fix this, run this very long command:\n terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces\n\nHere is a coda which is also long enough to wrap and so it should eventually make it onto multiple lines. THE END", + }) + expected := ` Error: Everything went wrong @@ -190,10 +690,10 @@ wrap onto multiple lines. Thank-you very much for listening. To fix this, run this very long command: terraform read-my-mind -please -thanks -but-do-not-wrap-this-line-because-it-is-prefixed-with-spaces -Here is a coda which is also long enough to wrap and so it should eventually -make it onto multiple lines. THE END +Here is a coda which is also long enough to wrap and so it should +eventually make it onto multiple lines. THE END ` - output := Diagnostic(diags[0], nil, color, 76) + output := DiagnosticPlain(diags[0], nil, 76) if output != expected { t.Fatalf("unexpected output: got:\n%s\nwant\n%s\n", output, expected) diff --git a/command/format/diff.go b/command/format/diff.go index 7a3df5f09..abf42c6f9 100644 --- a/command/format/diff.go +++ b/command/format/diff.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/helper/experiment" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/plans/objchange" "github.com/hashicorp/terraform/states" @@ -99,7 +98,6 @@ func ResourceChange( color: color, action: change.Action, requiredReplace: change.RequiredReplace, - concise: experiment.Enabled(experiment.X_concise_diff), } // Most commonly-used resources have nested blocks that result in us @@ -154,10 +152,9 @@ func OutputChanges( ) string { var buf bytes.Buffer p := blockBodyDiffPrinter{ - buf: &buf, - color: color, - action: plans.Update, // not actually used in this case, because we're not printing a containing block - concise: experiment.Enabled(experiment.X_concise_diff), + buf: &buf, + color: color, + action: plans.Update, // not actually used in this case, because we're not printing a containing block } // We're going to reuse the codepath we used for printing resource block @@ -200,7 +197,8 @@ type blockBodyDiffPrinter struct { color *colorstring.Colorize action plans.Action requiredReplace cty.PathSet - concise bool + // verbose is set to true when using the "diff" printer to format state + verbose bool } type blockBodyDiffResult struct { @@ -326,7 +324,7 @@ func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.At path = append(path, cty.GetAttrStep{Name: name}) action, showJustNew := getPlanActionAndShow(old, new) - if action == plans.NoOp && p.concise && !identifyingAttribute(name, attrS) { + if action == plans.NoOp && !p.verbose && !identifyingAttribute(name, attrS) { return true } @@ -620,11 +618,10 @@ func (p *blockBodyDiffPrinter) writeSensitiveNestedBlockDiff(name string, old, n p.buf.WriteRune('\n') p.buf.WriteString(strings.Repeat(" ", indent+2)) p.buf.WriteString("}") - return } func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) bool { - if action == plans.NoOp && p.concise { + if action == plans.NoOp && !p.verbose { return true } @@ -878,7 +875,7 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa } } - if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 { + if !strings.Contains(oldS, "\n") && !strings.Contains(newS, "\n") { break } @@ -904,23 +901,35 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa } } - diffLines := ctySequenceDiff(oldLines, newLines) - for _, diffLine := range diffLines { - p.buf.WriteString(strings.Repeat(" ", indent+2)) - p.writeActionSymbol(diffLine.Action) - - switch diffLine.Action { - case plans.NoOp, plans.Delete: - p.buf.WriteString(diffLine.Before.AsString()) - case plans.Create: - p.buf.WriteString(diffLine.After.AsString()) - default: - // Should never happen since the above covers all - // actions that ctySequenceDiff can return for strings - p.buf.WriteString(diffLine.After.AsString()) - + // Optimization for strings which are exactly equal: just print + // directly without calculating the sequence diff. This makes a + // significant difference when this code path is reached via a + // writeValue call with a large multi-line string. + if oldS == newS { + for _, line := range newLines { + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.buf.WriteString(line.AsString()) + p.buf.WriteString("\n") + } + } else { + diffLines := ctySequenceDiff(oldLines, newLines) + for _, diffLine := range diffLines { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(diffLine.Action) + + switch diffLine.Action { + case plans.NoOp, plans.Delete: + p.buf.WriteString(diffLine.Before.AsString()) + case plans.Create: + p.buf.WriteString(diffLine.After.AsString()) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return for strings + p.buf.WriteString(diffLine.After.AsString()) + + } + p.buf.WriteString("\n") } - p.buf.WriteString("\n") } p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol @@ -984,7 +993,7 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa action = plans.NoOp } - if action == plans.NoOp && p.concise { + if action == plans.NoOp && !p.verbose { suppressedElements++ continue } @@ -1024,8 +1033,7 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa var changeShown bool for i := 0; i < len(elemDiffs); i++ { - // In concise mode, push any no-op diff elements onto the stack - if p.concise { + if !p.verbose { for i < len(elemDiffs) && elemDiffs[i].Action == plans.NoOp { suppressedElements = append(suppressedElements, elemDiffs[i]) i++ @@ -1053,7 +1061,6 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa if hidden > 0 && i < len(elemDiffs) { hidden-- nextContextDiff = suppressedElements[hidden] - suppressedElements = suppressedElements[:hidden] } // If there are still hidden elements, show an elision @@ -1161,7 +1168,7 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa } } - if action == plans.NoOp && p.concise { + if action == plans.NoOp && !p.verbose { suppressedElements++ continue } @@ -1262,7 +1269,7 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa action = plans.Update } - if action == plans.NoOp && p.concise { + if action == plans.NoOp && !p.verbose { suppressedElements++ continue } diff --git a/command/format/diff_test.go b/command/format/diff_test.go index 076b1a9a2..db2d0b7de 100644 --- a/command/format/diff_test.go +++ b/command/format/diff_test.go @@ -7,7 +7,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/helper/experiment" "github.com/hashicorp/terraform/plans" "github.com/mitchellh/colorstring" "github.com/zclconf/go-cty/cty" @@ -362,13 +361,6 @@ new line ~ str = "before" -> "after" # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "blah" -> (known after apply) - password = (sensitive value) - ~ str = "before" -> "after" - } `, }, @@ -502,18 +494,6 @@ new line } # (2 unchanged attributes hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - bar = "bar" - foo = "foo" - id = "i-02ae66f368e8518a9" - name = "alice" - tags = { - "name" = "bob" - } - } `, }, } @@ -594,19 +574,6 @@ func TestResourceChange_JSON(t *testing.T) { ) } `, - - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - aaa = "value" - + bbb = "new_value" - - ccc = 5 -> null - } - ) - } -`, }, "in-place update (from empty tuple)": { Action: plans.Update, @@ -739,17 +706,6 @@ func TestResourceChange_JSON(t *testing.T) { } # forces replacement ) } -`, - VerboseOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ { - aaa = "value" - + bbb = "new_value" - } # forces replacement - ) - } `, }, "in-place update (whitespace change)": { @@ -871,18 +827,6 @@ func TestResourceChange_JSON(t *testing.T) { ] ) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - "first", - "second", - - "third", - ] - ) - } `, }, "JSON list item addition": { @@ -916,19 +860,6 @@ func TestResourceChange_JSON(t *testing.T) { ) } `, - - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ json_field = jsonencode( - ~ [ - "first", - "second", - + "third", - ] - ) - } -`, }, "JSON list object addition": { Action: plans.Update, @@ -1223,15 +1154,6 @@ func TestResourceChange_primitiveList(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + list_field = [ - + "new-element", - ] - } `, }, "in-place update - first addition": { @@ -1266,15 +1188,6 @@ func TestResourceChange_primitiveList(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - + "new-element", - ] - } `, }, "in-place update - insertion": { @@ -1324,20 +1237,6 @@ func TestResourceChange_primitiveList(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - "aaaa", - "bbbb", - + "cccc", - "dddd", - "eeee", - "ffff", - ] - } `, }, "force-new update - insertion": { @@ -1381,17 +1280,6 @@ func TestResourceChange_primitiveList(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ # forces replacement - "aaaa", - + "bbbb", - "cccc", - ] - } `, }, "in-place update - deletion": { @@ -1438,19 +1326,6 @@ func TestResourceChange_primitiveList(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - - "aaaa", - "bbbb", - - "cccc", - "dddd", - "eeee", - ] - } `, }, "creation - empty list": { @@ -1515,17 +1390,6 @@ func TestResourceChange_primitiveList(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - - "aaaa", - - "bbbb", - - "cccc", - ] - } `, }, "in-place update - null to empty": { @@ -1556,13 +1420,6 @@ func TestResourceChange_primitiveList(t *testing.T) { + list_field = [] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + list_field = [] - } `, }, "update to unknown element": { @@ -1606,18 +1463,6 @@ func TestResourceChange_primitiveList(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - "aaaa", - - "bbbb", - + (known after apply), - "cccc", - ] - } `, }, "update - two new unknown elements": { @@ -1668,21 +1513,6 @@ func TestResourceChange_primitiveList(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ list_field = [ - "aaaa", - - "bbbb", - + (known after apply), - + (known after apply), - "cccc", - "dddd", - "eeee", - ] - } `, }, } @@ -1734,19 +1564,6 @@ func TestResourceChange_primitiveTuple(t *testing.T) { # (1 unchanged element hidden) ] } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - id = "i-02ae66f368e8518a9" - ~ tuple_field = [ - "aaaa", - "bbbb", - - "dddd", - + "cccc", - "eeee", - "ffff", - ] - } `, }, } @@ -1787,15 +1604,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + set_field = [ - + "new-element", - ] - } `, }, "in-place update - first insertion": { @@ -1830,15 +1638,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - + "new-element", - ] - } `, }, "in-place update - insertion": { @@ -1879,17 +1678,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - "aaaa", - + "bbbb", - "cccc", - ] - } `, }, "force-new update - insertion": { @@ -1932,17 +1720,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ # forces replacement - "aaaa", - + "bbbb", - "cccc", - ] - } `, }, "in-place update - deletion": { @@ -1983,17 +1760,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - "bbbb", - - "cccc", - ] - } `, }, "creation - empty set": { @@ -2055,16 +1821,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - - "bbbb", - ] - } `, }, "in-place update - null to empty set": { @@ -2095,13 +1851,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { + set_field = [] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + set_field = [] - } `, }, "in-place update to unknown": { @@ -2138,16 +1887,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { ] -> (known after apply) # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - - "aaaa", - - "bbbb", - ] -> (known after apply) - } `, }, "in-place update to unknown element": { @@ -2188,17 +1927,6 @@ func TestResourceChange_primitiveSet(t *testing.T) { ] # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ set_field = [ - "aaaa", - - "bbbb", - ~ (known after apply), - ] - } `, }, } @@ -2239,15 +1967,6 @@ func TestResourceChange_map(t *testing.T) { } # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - + map_field = { - + "new-key" = "new-element" - } - } `, }, "in-place update - first insertion": { @@ -2282,15 +2001,6 @@ func TestResourceChange_map(t *testing.T) { } # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - + "new-key" = "new-element" - } - } `, }, "in-place update - insertion": { @@ -2331,17 +2041,6 @@ func TestResourceChange_map(t *testing.T) { } # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - "a" = "aaaa" - + "b" = "bbbb" - "c" = "cccc" - } - } `, }, "force-new update - insertion": { @@ -2384,17 +2083,6 @@ func TestResourceChange_map(t *testing.T) { } # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { # forces replacement - "a" = "aaaa" - + "b" = "bbbb" - "c" = "cccc" - } - } `, }, "in-place update - deletion": { @@ -2435,17 +2123,6 @@ func TestResourceChange_map(t *testing.T) { } # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - - "a" = "aaaa" -> null - "b" = "bbbb" - - "c" = "cccc" -> null - } - } `, }, "creation - empty": { @@ -2513,17 +2190,6 @@ func TestResourceChange_map(t *testing.T) { } # (1 unchanged attribute hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ami = "ami-STATIC" - ~ id = "i-02ae66f368e8518a9" -> (known after apply) - ~ map_field = { - "a" = "aaaa" - ~ "b" = "bbbb" -> (known after apply) - "c" = "cccc" - } - } `, }, } @@ -2582,16 +2248,6 @@ func TestResourceChange_nestedList(t *testing.T) { # (1 unchanged block hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - - root_block_device { - volume_type = "gp2" - } - } `, }, "in-place update - creation": { @@ -2756,17 +2412,6 @@ func TestResourceChange_nestedList(t *testing.T) { # (1 unchanged attribute hidden) } } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - - ~ root_block_device { - + new_field = "new_value" - volume_type = "gp2" - } - } `, }, "force-new update (inside block)": { @@ -3344,17 +2989,6 @@ func TestResourceChange_nestedMap(t *testing.T) { # (1 unchanged attribute hidden) } } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - - ~ root_block_device "a" { - + new_field = "new_value" - volume_type = "gp2" - } - } `, }, "in-place update - insertion": { @@ -3422,20 +3056,6 @@ func TestResourceChange_nestedMap(t *testing.T) { } # (1 unchanged block hidden) } -`, - VerboseOutput: ` # test_instance.example will be updated in-place - ~ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - - root_block_device "a" { - volume_type = "gp2" - } - + root_block_device "b" { - + new_field = "new_value" - + volume_type = "gp2" - } - } `, }, "force-new update (whole block)": { @@ -3500,19 +3120,6 @@ func TestResourceChange_nestedMap(t *testing.T) { } # (1 unchanged block hidden) } -`, - VerboseOutput: ` # test_instance.example must be replaced --/+ resource "test_instance" "example" { - ~ ami = "ami-BEFORE" -> "ami-AFTER" - id = "i-02ae66f368e8518a9" - - ~ root_block_device "a" { # forces replacement - ~ volume_type = "gp2" -> "different" - } - root_block_device "b" { - volume_type = "standard" - } - } `, }, "in-place update - deletion": { @@ -4512,10 +4119,6 @@ type testCase struct { RequiredReplace cty.PathSet Tainted bool ExpectedOutput string - - // This field and all associated values can be removed if the concise diff - // experiment succeeds. - VerboseOutput string } func runTestCases(t *testing.T, testCases map[string]testCase) { @@ -4569,25 +4172,11 @@ func runTestCases(t *testing.T, testCases map[string]testCase) { RequiredReplace: tc.RequiredReplace, } - experiment.SetEnabled(experiment.X_concise_diff, true) output := ResourceChange(change, tc.Tainted, tc.Schema, color) if output != tc.ExpectedOutput { t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.ExpectedOutput) t.Errorf("%s", cmp.Diff(output, tc.ExpectedOutput)) } - - // Temporary coverage for verbose diff behaviour. All lines below - // in this function can be removed if the concise diff experiment - // succeeds. - if tc.VerboseOutput == "" { - return - } - experiment.SetEnabled(experiment.X_concise_diff, false) - output = ResourceChange(change, tc.Tainted, tc.Schema, color) - if output != tc.VerboseOutput { - t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.VerboseOutput) - t.Errorf("%s", cmp.Diff(output, tc.VerboseOutput)) - } }) } } @@ -4694,7 +4283,6 @@ func TestOutputChanges(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - experiment.SetEnabled(experiment.X_concise_diff, true) output := OutputChanges(tc.changes, color) if output != tc.output { t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", output, tc.output) diff --git a/command/format/state.go b/command/format/state.go index 6d6e2cee2..9fcb41f1c 100644 --- a/command/format/state.go +++ b/command/format/state.go @@ -45,9 +45,10 @@ func State(opts *StateOpts) string { buf := bytes.NewBufferString("[reset]") p := blockBodyDiffPrinter{ - buf: buf, - color: opts.Color, - action: plans.NoOp, + buf: buf, + color: opts.Color, + action: plans.NoOp, + verbose: true, } // Format all the modules @@ -74,7 +75,11 @@ func State(opts *StateOpts) string { for _, k := range ks { v := m.OutputValues[k] p.buf.WriteString(fmt.Sprintf("%s = ", k)) - p.writeValue(v.Value, plans.NoOp, 0) + if v.Sensitive { + p.buf.WriteString("(sensitive value)") + } else { + p.writeValue(v.Value, plans.NoOp, 0) + } p.buf.WriteString("\n") } } @@ -209,116 +214,3 @@ func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraf } p.buf.WriteString("\n") } - -func formatNestedList(indent string, outputList []interface{}) string { - outputBuf := new(bytes.Buffer) - outputBuf.WriteString(fmt.Sprintf("%s[", indent)) - - lastIdx := len(outputList) - 1 - - for i, value := range outputList { - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, " ", value)) - if i != lastIdx { - outputBuf.WriteString(",") - } - } - - outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatListOutput(indent, outputName string, outputList []interface{}) string { - keyIndent := "" - - outputBuf := new(bytes.Buffer) - - if outputName != "" { - outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName)) - keyIndent = " " - } - - lastIdx := len(outputList) - 1 - - for i, value := range outputList { - switch typedValue := value.(type) { - case string: - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value)) - case []interface{}: - outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, - formatNestedList(indent+keyIndent, typedValue))) - case map[string]interface{}: - outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, - formatNestedMap(indent+keyIndent, typedValue))) - } - - if lastIdx != i { - outputBuf.WriteString(",") - } - } - - if outputName != "" { - if len(outputList) > 0 { - outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) - } else { - outputBuf.WriteString("]") - } - } - - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatNestedMap(indent string, outputMap map[string]interface{}) string { - ks := make([]string, 0, len(outputMap)) - for k := range outputMap { - ks = append(ks, k) - } - sort.Strings(ks) - - outputBuf := new(bytes.Buffer) - outputBuf.WriteString(fmt.Sprintf("%s{", indent)) - - lastIdx := len(outputMap) - 1 - for i, k := range ks { - v := outputMap[k] - outputBuf.WriteString(fmt.Sprintf("\n%s%s = %v", indent+" ", k, v)) - - if lastIdx != i { - outputBuf.WriteString(",") - } - } - - outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) - - return strings.TrimPrefix(outputBuf.String(), "\n") -} - -func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string { - ks := make([]string, 0, len(outputMap)) - for k := range outputMap { - ks = append(ks, k) - } - sort.Strings(ks) - - keyIndent := "" - - outputBuf := new(bytes.Buffer) - if outputName != "" { - outputBuf.WriteString(fmt.Sprintf("%s%s = {", indent, outputName)) - keyIndent = " " - } - - for _, k := range ks { - v := outputMap[k] - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s = %v", indent, keyIndent, k, v)) - } - - if outputName != "" { - if len(outputMap) > 0 { - outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) - } else { - outputBuf.WriteString("}") - } - } - - return strings.TrimPrefix(outputBuf.String(), "\n") -} diff --git a/command/format/state_test.go b/command/format/state_test.go index d7864ee16..2fd11ce26 100644 --- a/command/format/state_test.go +++ b/command/format/state_test.go @@ -9,15 +9,9 @@ import ( "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/colorstring" "github.com/zclconf/go-cty/cty" ) -var disabledColorize = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Disable: true, -} - func TestState(t *testing.T) { tests := []struct { State *StateOpts @@ -92,43 +86,49 @@ func testProvider() *terraform.MockProvider { return providers.ReadResourceResponse{NewState: req.PriorState} } - p.GetSchemaReturn = testProviderSchema() + p.GetSchemaResponse = testProviderSchema() return p } -func testProviderSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Optional: true}, +func testProviderSchema() *providers.GetSchemaResponse { + return &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_resource": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "woozles": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "woozles": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Optional: true}, + }, }, }, }, }, }, }, - DataSources: map[string]*configschema.Block{ + DataSources: map[string]providers.Schema{ "test_data_source": { - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Computed: true}, + }, }, }, }, @@ -139,7 +139,7 @@ func testSchemas() *terraform.Schemas { provider := testProvider() return &terraform.Schemas{ Providers: map[addrs.Provider]*terraform.ProviderSchema{ - addrs.NewDefaultProvider("test"): provider.GetSchemaReturn, + addrs.NewDefaultProvider("test"): provider.ProviderSchema(), }, } } @@ -219,7 +219,7 @@ map_var = { "first" = "foo" "second" = "bar" } -sensitive_var = "secret!!!" +sensitive_var = (sensitive value) string_var = "string value"` func basicState(t *testing.T) *states.State { diff --git a/command/format/trivia.go b/command/format/trivia.go new file mode 100644 index 000000000..b97d50b0e --- /dev/null +++ b/command/format/trivia.go @@ -0,0 +1,58 @@ +package format + +import ( + "strings" + + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" +) + +// HorizontalRule returns a newline character followed by a number of +// horizontal line characters to fill the given width. +// +// If the given colorize has colors enabled, the rule will also be given a +// dark grey color to attempt to visually de-emphasize it for sighted users. +// +// This is intended for printing to the UI via mitchellh/cli.UI.Output, or +// similar, which will automatically append a trailing newline too. +func HorizontalRule(color *colorstring.Colorize, width int) string { + if width <= 1 { + return "\n" + } + rule := strings.Repeat("─", width-1) + if color == nil { // sometimes unit tests don't populate this properly + return "\n" + rule + } + return color.Color("[dark_gray]\n" + rule) +} + +// WordWrap takes a string containing unbroken lines of text and inserts +// newline characters to try to make the text fit within the given width. +// +// The string can already contain newline characters, for example if you are +// trying to render multiple paragraphs of text. (In that case, our usual +// style would be to have _two_ newline characters as the paragraph separator.) +// +// As a special case, any line that begins with at least one space will be left +// unbroken. This allows including literal segments in the output, such as +// code snippets or filenames, where word wrapping would be confusing. +func WordWrap(str string, width int) string { + if width <= 1 { + // Silly edge case. We'll just return the original string to avoid + // panicking or doing other weird stuff. + return str + } + + var buf strings.Builder + lines := strings.Split(str, "\n") + for i, line := range lines { + if !strings.HasPrefix(line, " ") { + line = wordwrap.WrapString(line, uint(width-1)) + } + if i > 0 { + buf.WriteByte('\n') // reintroduce the newlines we skipped in Scan + } + buf.WriteString(line) + } + return buf.String() +} diff --git a/command/get_test.go b/command/get_test.go index 013b9779d..7d9137425 100644 --- a/command/get_test.go +++ b/command/get_test.go @@ -9,8 +9,10 @@ import ( ) func TestGet(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + td := tempDir(t) + testCopyDir(t, testFixturePath("get"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() ui := new(cli.MockUi) c := &GetCommand{ @@ -21,9 +23,7 @@ func TestGet(t *testing.T) { }, } - args := []string{ - testFixturePath("get"), - } + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) } @@ -53,7 +53,7 @@ func TestGet_multipleArgs(t *testing.T) { } } -func TestGet_noArgs(t *testing.T) { +func TestGet_update(t *testing.T) { td := tempDir(t) testCopyDir(t, testFixturePath("get"), td) defer os.RemoveAll(td) @@ -68,33 +68,8 @@ func TestGet_noArgs(t *testing.T) { }, } - args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - output := ui.OutputWriter.String() - if !strings.Contains(output, "- foo in") { - t.Fatalf("doesn't look like get: %s", output) - } -} - -func TestGet_update(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) - - ui := new(cli.MockUi) - c := &GetCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - dataDir: tempDir(t), - }, - } - args := []string{ "-update", - testFixturePath("get"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) diff --git a/command/graph.go b/command/graph.go index fba33c6f8..dfc1a2f8e 100644 --- a/command/graph.go +++ b/command/graph.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" "github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/terraform/backend" @@ -23,6 +23,7 @@ func (c *GraphCommand) Run(args []string) int { var graphTypeStr string var moduleDepth int var verbose bool + var planPath string args = c.Meta.process(args) cmdFlags := c.Meta.defaultFlagSet("graph") @@ -30,6 +31,7 @@ func (c *GraphCommand) Run(args []string) int { cmdFlags.StringVar(&graphTypeStr, "type", "", "type") cmdFlags.IntVar(&moduleDepth, "module-depth", -1, "module-depth") cmdFlags.BoolVar(&verbose, "verbose", false, "verbose") + cmdFlags.StringVar(&planPath, "plan", "", "plan") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) @@ -48,16 +50,14 @@ func (c *GraphCommand) Run(args []string) int { return 1 } - // Check if the path is a plan - var plan *plans.Plan - planFile, err := c.PlanFile(configPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if planFile != nil { - // Reset for backend loading - configPath = "" + // Try to load plan if path is specified + var planFile *planfile.Reader + if planPath != "" { + planFile, err = c.PlanFile(planPath) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } } var diags tfdiags.Diagnostics @@ -87,6 +87,9 @@ func (c *GraphCommand) Run(args []string) int { return 1 } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + // Build the operation opReq := c.Operation(b) opReq.ConfigDir = configPath @@ -109,7 +112,7 @@ func (c *GraphCommand) Run(args []string) int { // Determine the graph type graphType := terraform.GraphTypePlan - if plan != nil { + if planFile != nil { graphType = terraform.GraphTypeApply } @@ -160,10 +163,10 @@ func (c *GraphCommand) Run(args []string) int { func (c *GraphCommand) Help() string { helpText := ` -Usage: terraform graph [options] [DIR] +Usage: terraform graph [options] Outputs the visual execution graph of Terraform resources according to - configuration files in DIR (or the current directory if omitted). + either the current configuration or an execution plan. The graph is outputted in DOT format. The typical program that can read this format is GraphViz, but many web services are also available @@ -177,6 +180,9 @@ Usage: terraform graph [options] [DIR] Options: + -plan=tfplan Render graph using the specified plan file instead of the + configuration in the current directory. + -draw-cycles Highlight any cycles in the graph with colored edges. This helps when diagnosing cycle errors. diff --git a/command/graph_test.go b/command/graph_test.go index dd9f4413c..392db1a9c 100644 --- a/command/graph_test.go +++ b/command/graph_test.go @@ -14,8 +14,10 @@ import ( ) func TestGraph(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + td := tempDir(t) + testCopyDir(t, testFixturePath("graph"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() ui := new(cli.MockUi) c := &GraphCommand{ @@ -25,9 +27,7 @@ func TestGraph(t *testing.T) { }, } - args := []string{ - testFixturePath("graph"), - } + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) } @@ -57,14 +57,10 @@ func TestGraph_multipleArgs(t *testing.T) { } func TestGraph_noArgs(t *testing.T) { - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(testFixturePath("graph")); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) + td := tempDir(t) + testCopyDir(t, testFixturePath("graph"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() ui := new(cli.MockUi) c := &GraphCommand{ @@ -154,7 +150,7 @@ func TestGraph_plan(t *testing.T) { } args := []string{ - planPath, + "-plan", planPath, } if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) diff --git a/backend/local/hook_count.go b/command/hook_count.go similarity index 99% rename from backend/local/hook_count.go rename to command/hook_count.go index f9d619ef2..40a834cd0 100644 --- a/backend/local/hook_count.go +++ b/command/hook_count.go @@ -1,4 +1,4 @@ -package local +package command import ( "sync" diff --git a/backend/local/hook_count_test.go b/command/hook_count_test.go similarity index 73% rename from backend/local/hook_count_test.go rename to command/hook_count_test.go index 938e730df..ec7eba984 100644 --- a/backend/local/hook_count_test.go +++ b/command/hook_count_test.go @@ -1,4 +1,4 @@ -package local +package command import ( "reflect" @@ -10,6 +10,8 @@ import ( "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/terraform" + + legacy "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestCountHook_impl(t *testing.T) { @@ -19,8 +21,8 @@ func TestCountHook_impl(t *testing.T) { func TestCountHookPostDiff_DestroyDeposed(t *testing.T) { h := new(CountHook) - resources := map[string]*terraform.InstanceDiff{ - "lorem": &terraform.InstanceDiff{DestroyDeposed: true}, + resources := map[string]*legacy.InstanceDiff{ + "lorem": &legacy.InstanceDiff{DestroyDeposed: true}, } for k := range resources { @@ -47,11 +49,11 @@ func TestCountHookPostDiff_DestroyDeposed(t *testing.T) { func TestCountHookPostDiff_DestroyOnly(t *testing.T) { h := new(CountHook) - resources := map[string]*terraform.InstanceDiff{ - "foo": &terraform.InstanceDiff{Destroy: true}, - "bar": &terraform.InstanceDiff{Destroy: true}, - "lorem": &terraform.InstanceDiff{Destroy: true}, - "ipsum": &terraform.InstanceDiff{Destroy: true}, + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{Destroy: true}, + "bar": &legacy.InstanceDiff{Destroy: true}, + "lorem": &legacy.InstanceDiff{Destroy: true}, + "ipsum": &legacy.InstanceDiff{Destroy: true}, } for k := range resources { @@ -78,20 +80,20 @@ func TestCountHookPostDiff_DestroyOnly(t *testing.T) { func TestCountHookPostDiff_AddOnly(t *testing.T) { h := new(CountHook) - resources := map[string]*terraform.InstanceDiff{ - "foo": &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{RequiresNew: true}, + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{ + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{RequiresNew: true}, }, }, - "bar": &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{RequiresNew: true}, + "bar": &legacy.InstanceDiff{ + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{RequiresNew: true}, }, }, - "lorem": &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{RequiresNew: true}, + "lorem": &legacy.InstanceDiff{ + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{RequiresNew: true}, }, }, } @@ -120,23 +122,23 @@ func TestCountHookPostDiff_AddOnly(t *testing.T) { func TestCountHookPostDiff_ChangeOnly(t *testing.T) { h := new(CountHook) - resources := map[string]*terraform.InstanceDiff{ - "foo": &terraform.InstanceDiff{ + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{ Destroy: false, - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{}, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, }, }, - "bar": &terraform.InstanceDiff{ + "bar": &legacy.InstanceDiff{ Destroy: false, - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{}, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, }, }, - "lorem": &terraform.InstanceDiff{ + "lorem": &legacy.InstanceDiff{ Destroy: false, - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{}, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, }, }, } @@ -197,11 +199,11 @@ func TestCountHookPostDiff_Mixed(t *testing.T) { func TestCountHookPostDiff_NoChange(t *testing.T) { h := new(CountHook) - resources := map[string]*terraform.InstanceDiff{ - "foo": &terraform.InstanceDiff{}, - "bar": &terraform.InstanceDiff{}, - "lorem": &terraform.InstanceDiff{}, - "ipsum": &terraform.InstanceDiff{}, + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{}, + "bar": &legacy.InstanceDiff{}, + "lorem": &legacy.InstanceDiff{}, + "ipsum": &legacy.InstanceDiff{}, } for k := range resources { @@ -261,23 +263,23 @@ func TestCountHookPostDiff_DataSource(t *testing.T) { func TestCountHookApply_ChangeOnly(t *testing.T) { h := new(CountHook) - resources := map[string]*terraform.InstanceDiff{ - "foo": &terraform.InstanceDiff{ + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{ Destroy: false, - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{}, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, }, }, - "bar": &terraform.InstanceDiff{ + "bar": &legacy.InstanceDiff{ Destroy: false, - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{}, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, }, }, - "lorem": &terraform.InstanceDiff{ + "lorem": &legacy.InstanceDiff{ Destroy: false, - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{}, + Attributes: map[string]*legacy.ResourceAttrDiff{ + "foo": &legacy.ResourceAttrDiff{}, }, }, } @@ -306,11 +308,11 @@ func TestCountHookApply_ChangeOnly(t *testing.T) { func TestCountHookApply_DestroyOnly(t *testing.T) { h := new(CountHook) - resources := map[string]*terraform.InstanceDiff{ - "foo": &terraform.InstanceDiff{Destroy: true}, - "bar": &terraform.InstanceDiff{Destroy: true}, - "lorem": &terraform.InstanceDiff{Destroy: true}, - "ipsum": &terraform.InstanceDiff{Destroy: true}, + resources := map[string]*legacy.InstanceDiff{ + "foo": &legacy.InstanceDiff{Destroy: true}, + "bar": &legacy.InstanceDiff{Destroy: true}, + "lorem": &legacy.InstanceDiff{Destroy: true}, + "ipsum": &legacy.InstanceDiff{Destroy: true}, } for k := range resources { diff --git a/command/import.go b/command/import.go index cf22946c5..275349408 100644 --- a/command/import.go +++ b/command/import.go @@ -35,6 +35,7 @@ func (c *ImportCommand) Run(args []string) int { args = c.Meta.process(args) cmdFlags := c.Meta.extendedFlagSet("import") + cmdFlags.BoolVar(&c.ignoreRemoteVersion, "ignore-remote-version", false, "continue even if remote and local Terraform versions are incompatible") cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") @@ -198,6 +199,14 @@ func (c *ImportCommand) Run(args []string) int { } } + // Check remote Terraform version is compatible + remoteVersionDiags := c.remoteBackendVersionCheck(b, opReq.Workspace) + diags = diags.Append(remoteVersionDiags) + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + // Get the context ctx, state, ctxDiags := local.Context(opReq) diags = diags.Append(ctxDiags) @@ -321,6 +330,9 @@ Options: a file. If "terraform.tfvars" or any ".auto.tfvars" files are present, they will be automatically loaded. + -ignore-remote-version Continue even if remote and local Terraform versions + are incompatible. This may result in an unusable + workspace, and should be used with extreme caution. ` return strings.TrimSpace(helpText) @@ -331,7 +343,7 @@ func (c *ImportCommand) Synopsis() string { } const importCommandInvalidAddressReference = `For information on valid syntax, see: -https://www.terraform.io/docs/internals/resource-addressing.html` +https://www.terraform.io/docs/cli/state/resource-addressing.html` const importCommandMissingResourceFmt = `[reset][bold][red]Error:[reset][bold] resource address %q does not exist in the configuration.[reset] diff --git a/command/import_test.go b/command/import_test.go index cb36589bb..354b20536 100644 --- a/command/import_test.go +++ b/command/import_test.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/internal/copy" "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/tfdiags" ) @@ -33,7 +32,7 @@ func TestImport(t *testing.T) { } p.ImportResourceStateFn = nil - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_instance", @@ -43,11 +42,13 @@ func TestImport(t *testing.T) { }, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, @@ -84,7 +85,7 @@ func TestImport_providerConfig(t *testing.T) { } p.ImportResourceStateFn = nil - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_instance", @@ -94,16 +95,20 @@ func TestImport_providerConfig(t *testing.T) { }, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, @@ -191,7 +196,7 @@ func TestImport_remoteState(t *testing.T) { } p.ImportResourceStateFn = nil - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_instance", @@ -201,16 +206,20 @@ func TestImport_remoteState(t *testing.T) { }, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, @@ -338,7 +347,7 @@ func TestImport_providerConfigWithVar(t *testing.T) { } p.ImportResourceStateFn = nil - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_instance", @@ -348,16 +357,20 @@ func TestImport_providerConfigWithVar(t *testing.T) { }, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, @@ -412,7 +425,7 @@ func TestImport_providerConfigWithDataSource(t *testing.T) { } p.ImportResourceStateFn = nil - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_instance", @@ -422,23 +435,29 @@ func TestImport_providerConfigWithDataSource(t *testing.T) { }, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, }, }, }, - DataSources: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, + }, + }, + }, + DataSources: map[string]providers.Schema{ "test_data": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -469,7 +488,7 @@ func TestImport_providerConfigWithVarDefault(t *testing.T) { } p.ImportResourceStateFn = nil - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_instance", @@ -479,16 +498,20 @@ func TestImport_providerConfigWithVarDefault(t *testing.T) { }, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, @@ -542,7 +565,7 @@ func TestImport_providerConfigWithVarFile(t *testing.T) { } p.ImportResourceStateFn = nil - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_instance", @@ -552,16 +575,20 @@ func TestImport_providerConfigWithVarFile(t *testing.T) { }, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, @@ -616,7 +643,7 @@ func TestImport_allowMissingResourceConfig(t *testing.T) { } p.ImportResourceStateFn = nil - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_instance", @@ -626,11 +653,13 @@ func TestImport_allowMissingResourceConfig(t *testing.T) { }, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, @@ -753,11 +782,13 @@ func TestImportModuleVarFile(t *testing.T) { statePath := testTempFile(t) p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -823,11 +854,13 @@ func TestImportModuleInputVariableEvaluation(t *testing.T) { statePath := testTempFile(t) p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -967,15 +1000,3 @@ test_instance.foo: ID = yay provider = provider["registry.terraform.io/hashicorp/test"] ` - -const testImportCustomProviderStr = ` -test_instance.foo: - ID = yay - provider = provider["registry.terraform.io/hashicorp/test"].alias -` - -const testImportProviderMismatchStr = ` -test_instance.foo: - ID = yay - provider = provider["registry.terraform.io/hashicorp/test-beta"] -` diff --git a/command/init.go b/command/init.go index e90449ca5..414d80d5a 100644 --- a/command/init.go +++ b/command/init.go @@ -4,8 +4,6 @@ import ( "context" "fmt" "log" - "os" - "path/filepath" "strings" "github.com/hashicorp/hcl/v2" @@ -31,16 +29,12 @@ import ( // module and clones it to the working directory. type InitCommand struct { Meta - - // getPlugins is for the -get-plugins flag - getPlugins bool } func (c *InitCommand) Run(args []string) int { var flagFromModule string var flagBackend, flagGet, flagUpgrade bool var flagPluginPath FlagStringSlice - var flagVerifyPlugins bool flagConfigExtra := newRawFlags("-backend-config") args = c.Meta.process(args) @@ -49,14 +43,10 @@ func (c *InitCommand) Run(args []string) int { cmdFlags.Var(flagConfigExtra, "backend-config", "") cmdFlags.StringVar(&flagFromModule, "from-module", "", "copy the source of the given module into the directory before init") cmdFlags.BoolVar(&flagGet, "get", true, "") - cmdFlags.BoolVar(&c.getPlugins, "get-plugins", true, "") cmdFlags.BoolVar(&c.forceInitCopy, "force-copy", false, "suppress prompts about copying state data") - cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") - cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") cmdFlags.BoolVar(&c.reconfigure, "reconfigure", false, "reconfigure") cmdFlags.BoolVar(&flagUpgrade, "upgrade", false, "") cmdFlags.Var(&flagPluginPath, "plugin-dir", "plugin directory") - cmdFlags.BoolVar(&flagVerifyPlugins, "verify-plugins", true, "verify plugins") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 @@ -66,14 +56,13 @@ func (c *InitCommand) Run(args []string) int { if len(flagPluginPath) > 0 { c.pluginPath = flagPluginPath - c.getPlugins = false } - // Validate the arg count + // Validate the arg count and get the working directory args = cmdFlags.Args() - if len(args) > 1 { - c.Ui.Error("The init command expects at most one argument.\n") - cmdFlags.Usage() + path, err := ModulePath(args) + if err != nil { + c.Ui.Error(err.Error()) return 1 } @@ -82,20 +71,6 @@ func (c *InitCommand) Run(args []string) int { return 1 } - // Get our pwd. We don't always need it but always getting it is easier - // than the logic to determine if it is or isn't needed. - pwd, err := os.Getwd() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err)) - return 1 - } - - // If an argument is provided then it overrides our working directory. - path := pwd - if len(args) == 1 { - path = args[0] - } - // This will track whether we outputted anything so that we know whether // to output a newline before the success message var header bool @@ -133,7 +108,7 @@ func (c *InitCommand) Run(args []string) int { c.Ui.Output("") } - // If our directory is empty, then we're done. We can't get or setup + // If our directory is empty, then we're done. We can't get or set up // the backend with an empty directory. empty, err := configs.IsEmptyDir(path) if err != nil { @@ -264,6 +239,7 @@ func (c *InitCommand) Run(args []string) int { // on a previous run) we'll use the current state as a potential source // of provider dependencies. if back != nil { + c.ignoreRemoteBackendVersionConflict(back) workspace, err := c.Workspace() if err != nil { c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) @@ -321,9 +297,9 @@ func (c *InitCommand) getModules(path string, earlyRoot *tfconfig.Module, upgrad } if upgrade { - c.Ui.Output(c.Colorize().Color(fmt.Sprintf("[reset][bold]Upgrading modules..."))) + c.Ui.Output(c.Colorize().Color("[reset][bold]Upgrading modules...")) } else { - c.Ui.Output(c.Colorize().Color(fmt.Sprintf("[reset][bold]Initializing modules..."))) + c.Ui.Output(c.Colorize().Color("[reset][bold]Initializing modules...")) } hooks := uiModuleInstallHooks{ @@ -350,7 +326,7 @@ func (c *InitCommand) getModules(path string, earlyRoot *tfconfig.Module, upgrad } func (c *InitCommand) initBackend(root *configs.Module, extraConfig rawFlags) (be backend.Backend, output bool, diags tfdiags.Diagnostics) { - c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[reset][bold]Initializing the backend..."))) + c.Ui.Output(c.Colorize().Color("\n[reset][bold]Initializing the backend...")) var backendConfig *configs.Backend var backendConfigOverride hcl.Body @@ -416,6 +392,12 @@ the backend configuration is present and valid. // Load the complete module tree, and fetch any missing providers. // This method outputs its own Ui. func (c *InitCommand) getProviders(config *configs.Config, state *states.State, upgrade bool, pluginDirs []string) (output, abort bool, diags tfdiags.Diagnostics) { + // Dev overrides cause the result of "terraform init" to be irrelevant for + // any overridden providers, so we'll warn about it to avoid later + // confusion when Terraform ends up using a different provider than the + // lock file called for. + diags = diags.Append(c.providerDevOverrideInitWarnings()) + // First we'll collect all the provider dependencies we can see in the // configuration and the state. reqs, hclDiags := config.ProviderRequirements() @@ -467,12 +449,15 @@ func (c *InitCommand) getProviders(config *configs.Config, state *states.State, log.Printf("[DEBUG] will search for provider plugins in %s", pluginDirs) } + // Installation can be aborted by interruption signals + ctx, done := c.InterruptibleContext() + defer done() + // Because we're currently just streaming a series of events sequentially // into the terminal, we're showing only a subset of the events to keep // things relatively concise. Later it'd be nice to have a progress UI // where statuses update in-place, but we can't do that as long as we // are shimming our vt100 output to the legacy console API on Windows. - missingProviders := make(map[addrs.Provider]struct{}) evts := &providercache.InstallerEvents{ PendingProviders: func(reqs map[addrs.Provider]getproviders.VersionConstraints) { c.Ui.Output(c.Colorize().Color( @@ -510,10 +495,6 @@ func (c *InitCommand) getProviders(config *configs.Config, state *states.State, c.Ui.Info(fmt.Sprintf("- Installing %s v%s...", provider.ForDisplay(), version)) }, QueryPackagesFailure: func(provider addrs.Provider, err error) { - // We track providers that had missing metadata because we might - // generate additional hints for some of them at the end. - missingProviders[provider] = struct{}{} - switch errorTy := err.(type) { case getproviders.ErrProviderNotFound: sources := errorTy.Sources @@ -529,11 +510,22 @@ func (c *InitCommand) getProviders(config *configs.Config, state *states.State, ), )) case getproviders.ErrRegistryProviderNotKnown: + // We might be able to suggest an alternative provider to use + // instead of this one. + var suggestion string + alternative := getproviders.MissingProviderSuggestion(ctx, provider, inst.ProviderSource()) + if alternative != provider { + suggestion = fmt.Sprintf( + "\n\nDid you intend to use %s? If so, you must specify that source address in each module which requires that provider. To see which modules are currently depending on %s, run the following command:\n terraform providers", + alternative.ForDisplay(), provider.ForDisplay(), + ) + } + diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, "Failed to query available provider packages", - fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s", - provider.ForDisplay(), err, + fmt.Sprintf("Could not retrieve the list of available versions for provider %s: %s%s", + provider.ForDisplay(), err, suggestion, ), )) case getproviders.ErrHostNoProviders: @@ -713,7 +705,7 @@ func (c *InitCommand) getProviders(config *configs.Config, state *states.State, if thirdPartySigned { c.Ui.Info(fmt.Sprintf("\nPartner and community providers are signed by their developers.\n" + "If you'd like to know more about provider signing, you can read about it here:\n" + - "https://www.terraform.io/docs/plugins/signing.html")) + "https://www.terraform.io/docs/cli/plugins/signing.html")) } }, HashPackageFailure: func(provider addrs.Provider, version getproviders.Version, err error) { @@ -729,81 +721,18 @@ func (c *InitCommand) getProviders(config *configs.Config, state *states.State, )) }, } - - // Dev overrides cause the result of "terraform init" to be irrelevant for - // any overridden providers, so we'll warn about it to avoid later - // confusion when Terraform ends up using a different provider than the - // lock file called for. - diags = diags.Append(c.providerDevOverrideWarnings()) + ctx = evts.OnContext(ctx) mode := providercache.InstallNewProvidersOnly if upgrade { mode = providercache.InstallUpgrades } - // Installation can be aborted by interruption signals - ctx, done := c.InterruptibleContext() - defer done() - ctx = evts.OnContext(ctx) newLocks, err := inst.EnsureProviderVersions(ctx, previousLocks, reqs, mode) if ctx.Err() == context.Canceled { c.showDiagnostics(diags) c.Ui.Error("Provider installation was canceled by an interrupt signal.") return true, true, diags } - if len(missingProviders) > 0 { - // If we encountered requirements for one or more providers where we - // weren't able to find any metadata, that _might_ be because a - // user had previously (before 0.14) been incorrectly using the - // .terraform/plugins directory as if it were a local filesystem - // mirror, rather than as the main cache directory. - // - // We no longer allow that because it'd be ambiguous whether plugins in - // there are explictly intended to be a local mirror or if they are - // just leftover cache entries from provider installation in - // Terraform 0.13. - // - // To help those users migrate we have a specialized warning message - // for it, which we'll produce only if one of the missing providers can - // be seen in the "legacy" cache directory, which is what we're now - // considering .terraform/plugins to be. (The _current_ cache directory - // is .terraform/providers.) - // - // This is only a heuristic, so it might potentially produce false - // positives if a user happens to encounter another sort of error - // while they are upgrading from Terraform 0.13 to 0.14. Aside from - // upgrading users should not end up in here because they won't - // have a legacy cache directory at all. - legacyDir := c.providerLegacyCacheDir() - if legacyDir != nil { // if the legacy directory is present at all - for missingProvider := range missingProviders { - if missingProvider.IsDefault() { - // If we get here for a default provider then it's more - // likely that something _else_ went wrong, like a network - // problem, so we'll skip the warning in this case to - // avoid potentially misleading the user into creating an - // unnecessary local mirror for an official provider. - continue - } - entry := legacyDir.ProviderLatestVersion(missingProvider) - if entry == nil { - continue - } - // If we get here then the missing provider was cached, which - // implies that it might be an in-house provider the user - // placed manually to try to make Terraform use it as if it - // were a local mirror directory. - wantDir := filepath.FromSlash(fmt.Sprintf("terraform.d/plugins/%s/%s/%s", missingProvider, entry.Version, getproviders.CurrentPlatform)) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "Missing provider is in legacy cache directory", - fmt.Sprintf( - "Terraform supports a number of local directories that can serve as automatic local filesystem mirrors, but .terraform/plugins is not one of them because Terraform v0.13 and earlier used this directory to cache copies of provider plugins retrieved from elsewhere.\n\nIf you intended to use this directory as a filesystem mirror for %s, place it instead in the following directory:\n %s", - missingProvider, wantDir, - ), - )) - } - } - } if err != nil { // The errors captured in "err" should be redundant with what we // received via the InstallerEvents callbacks above, so we'll @@ -970,21 +899,17 @@ func (c *InitCommand) AutocompleteFlags() complete.Flags { "-force-copy": complete.PredictNothing, "-from-module": completePredictModuleSource, "-get": completePredictBoolean, - "-get-plugins": completePredictBoolean, "-input": completePredictBoolean, - "-lock": completePredictBoolean, - "-lock-timeout": complete.PredictAnything, "-no-color": complete.PredictNothing, "-plugin-dir": complete.PredictDirs(""), "-reconfigure": complete.PredictNothing, "-upgrade": completePredictBoolean, - "-verify-plugins": completePredictBoolean, } } func (c *InitCommand) Help() string { helpText := ` -Usage: terraform init [options] [DIR] +Usage: terraform init [options] Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc. @@ -999,9 +924,6 @@ Usage: terraform init [options] [DIR] state. Even so, if you have important information, please back it up prior to running this command, just in case. - If no arguments are given, the configuration in this working directory - is initialized. - Options: -backend=true Configure the backend for this configuration. @@ -1022,31 +944,22 @@ Options: -get=true Download any modules for this configuration. - -get-plugins=true Download any missing plugins for this configuration. - -input=true Ask for input if necessary. If false, will error if input was required. - -lock=true Lock the state file when locking is supported. - - -lock-timeout=0s Duration to retry a state lock. - -no-color If specified, output won't contain any color. -plugin-dir Directory containing plugin binaries. This overrides all - default search paths for plugins, and prevents the + default search paths for plugins, and prevents the automatic installation of plugins. This flag can be used multiple times. -reconfigure Reconfigure the backend, ignoring any saved configuration. - -upgrade=false If installing modules (-get) or plugins (-get-plugins), - ignore previously-downloaded objects and install the + -upgrade=false If installing modules (-get) or plugins, ignore + previously-downloaded objects and install the latest version allowed within configured constraints. - - -verify-plugins=true Verify the authenticity and integrity of automatically - downloaded plugins. ` return strings.TrimSpace(helpText) } @@ -1091,15 +1004,6 @@ rerun this command to reinitialize your working directory. If you forget, other commands will detect it and remind you to do so if necessary. ` -const outputInitProvidersUnconstrained = ` -The following providers do not have any version constraints in configuration, -so the latest version was installed. - -To prevent automatic upgrades to new major versions that may contain breaking -changes, we recommend adding version constraints in a required_providers block -in your configuration, with the constraint strings suggested below. -` - // providerProtocolTooOld is a message sent to the CLI UI if the provider's // supported protocol versions are too old for the user's version of terraform, // but a newer version of the provider is compatible. diff --git a/command/init_test.go b/command/init_test.go index 96d853d36..c72be0456 100644 --- a/command/init_test.go +++ b/command/init_test.go @@ -17,7 +17,6 @@ import ( "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" - version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/configs/configschema" @@ -25,7 +24,6 @@ import ( "github.com/hashicorp/terraform/internal/getproviders" "github.com/hashicorp/terraform/internal/providercache" "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/states/statefile" "github.com/hashicorp/terraform/states/statemgr" ) @@ -74,42 +72,6 @@ func TestInit_multipleArgs(t *testing.T) { } } -func TestInit_fromModule_explicitDest(t *testing.T) { - td := tempDir(t) - os.MkdirAll(td, 0755) - defer os.RemoveAll(td) - defer testChdir(t, td)() - - ui := new(cli.MockUi) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - }, - } - - if _, err := os.Stat(DefaultStateFilename); err == nil { - // This should never happen; it indicates a bug in another test - // is causing a terraform.tfstate to get left behind in our directory - // here, which can interfere with our init process in a way that - // isn't relevant to this test. - fullPath, _ := filepath.Abs(DefaultStateFilename) - t.Fatalf("some other test has left terraform.tfstate behind:\n%s", fullPath) - } - - args := []string{ - "-from-module=" + testFixturePath("init"), - td, - } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - if _, err := os.Stat(filepath.Join(td, "hello.tf")); err != nil { - t.Fatalf("err: %s", err) - } -} - func TestInit_fromModule_cwdDest(t *testing.T) { // Create a temporary working directory that is empty td := tempDir(t) @@ -163,6 +125,10 @@ func TestInit_fromModule_dstInSrc(t *testing.T) { t.Fatalf("err: %s", err) } + if err := os.Chdir("foo"); err != nil { + t.Fatalf("err: %s", err) + } + ui := new(cli.MockUi) c := &InitCommand{ Meta: Meta{ @@ -172,8 +138,7 @@ func TestInit_fromModule_dstInSrc(t *testing.T) { } args := []string{ - "-from-module=.", - "foo", + "-from-module=./..", } if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) @@ -214,7 +179,7 @@ func TestInit_get(t *testing.T) { func TestInit_getUpgradeModules(t *testing.T) { // Create a temporary working directory that is empty td := tempDir(t) - os.MkdirAll(td, 0755) + testCopyDir(t, testFixturePath("init-get"), td) defer os.RemoveAll(td) defer testChdir(t, td)() @@ -228,9 +193,7 @@ func TestInit_getUpgradeModules(t *testing.T) { args := []string{ "-get=true", - "-get-plugins=false", "-upgrade", - testFixturePath("init-get"), } if code := c.Run(args); code != 0 { t.Fatalf("command did not complete successfully:\n%s", ui.ErrorWriter.String()) @@ -487,7 +450,7 @@ func TestInit_backendConfigFilePowershellConfusion(t *testing.T) { } output := ui.ErrorWriter.String() - if got, want := output, `Module directory ./input.config does not exist`; !strings.Contains(got, want) { + if got, want := output, `Too many command line arguments`; !strings.Contains(got, want) { t.Fatalf("wrong output\ngot:\n%s\n\nwant: message containing %q", got, want) } } @@ -684,41 +647,6 @@ func TestInit_backendCli_no_config_block(t *testing.T) { } } -func TestInit_targetSubdir(t *testing.T) { - // Create a temporary working directory that is empty - td := tempDir(t) - os.MkdirAll(td, 0755) - defer os.RemoveAll(td) - defer testChdir(t, td)() - - // copy the source into a subdir - testCopyDir(t, testFixturePath("init-backend"), filepath.Join(td, "source")) - - ui := new(cli.MockUi) - c := &InitCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - }, - } - - args := []string{ - "source", - } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } - - if _, err := os.Stat(filepath.Join(td, DefaultDataDir, DefaultStateFilename)); err != nil { - t.Fatalf("err: %s", err) - } - - // a data directory should not have been added to out working dir - if _, err := os.Stat(filepath.Join(td, "source", DefaultDataDir)); !os.IsNotExist(err) { - t.Fatalf("err: %s", err) - } -} - func TestInit_backendReinitWithExtra(t *testing.T) { td := tempDir(t) testCopyDir(t, testFixturePath("init-backend-empty"), td) @@ -846,7 +774,7 @@ func TestInit_inputFalse(t *testing.T) { } args := []string{"-input=false", "-backend-config=path=foo"} - if code := c.Run([]string{"-input=false"}); code != 0 { + if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter) } @@ -916,11 +844,11 @@ func TestInit_getProvider(t *testing.T) { ui := new(cli.MockUi) providerSource, close := newMockProviderSource(t, map[string][]string{ // looking for an exact version - "exact": []string{"1.2.3"}, + "exact": {"1.2.3"}, // config requires >= 2.3.3 - "greater-than": []string{"2.3.4", "2.3.3", "2.3.0"}, + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, // config specifies - "between": []string{"3.4.5", "2.3.4", "1.2.3"}, + "between": {"3.4.5", "2.3.4", "1.2.3"}, }) defer close() m := Meta{ @@ -965,12 +893,30 @@ func TestInit_getProvider(t *testing.T) { } defer f.Close() - s := &statefile.File{ - Lineage: "", - State: states.NewState(), - TerraformVersion: version.Must(version.NewVersion("100.1.0")), + // Construct a mock state file from the far future + type FutureState struct { + Version uint `json:"version"` + Lineage string `json:"lineage"` + TerraformVersion string `json:"terraform_version"` + Outputs map[string]interface{} `json:"outputs"` + Resources []map[string]interface{} `json:"resources"` + } + fs := &FutureState{ + Version: 999, + Lineage: "123-456-789", + TerraformVersion: "999.0.0", + Outputs: make(map[string]interface{}), + Resources: make([]map[string]interface{}, 0), + } + src, err := json.MarshalIndent(fs, "", " ") + if err != nil { + t.Fatalf("failed to marshal future state: %s", err) + } + src = append(src, '\n') + _, err = f.Write(src) + if err != nil { + t.Fatal(err) } - statefile.WriteForTest(s, f) ui := new(cli.MockUi) m.Ui = ui @@ -983,7 +929,7 @@ func TestInit_getProvider(t *testing.T) { } errMsg := ui.ErrorWriter.String() - if !strings.Contains(errMsg, "which is newer than current") { + if !strings.Contains(errMsg, "Unsupported state file format") { t.Fatal("unexpected error:", errMsg) } }) @@ -1000,10 +946,10 @@ func TestInit_getProviderSource(t *testing.T) { ui := new(cli.MockUi) providerSource, close := newMockProviderSource(t, map[string][]string{ // looking for an exact version - "acme/alpha": []string{"1.2.3"}, + "acme/alpha": {"1.2.3"}, // config doesn't specify versions for other providers - "registry.example.com/acme/beta": []string{"1.0.0"}, - "gamma": []string{"2.0.0"}, + "registry.example.com/acme/beta": {"1.0.0"}, + "gamma": {"2.0.0"}, }) defer close() m := Meta{ @@ -1038,85 +984,6 @@ func TestInit_getProviderSource(t *testing.T) { } } -func TestInit_getProviderInLegacyPluginCacheDir(t *testing.T) { - // Create a temporary working directory that is empty - td := tempDir(t) - testCopyDir(t, testFixturePath("init-legacy-provider-cache"), td) - defer os.RemoveAll(td) - defer testChdir(t, td)() - - // The test fixture has placeholder os_arch directories which we must - // now rename to match the current platform, or else the entries inside - // will be ignored. - platformStr := getproviders.CurrentPlatform.String() - if err := os.Rename( - ".terraform/plugins/example.com/test/b/1.1.0/os_arch", - ".terraform/plugins/example.com/test/b/1.1.0/"+platformStr, - ); err != nil { - t.Fatal(err) - } - if err := os.Rename( - ".terraform/plugins/registry.terraform.io/hashicorp/c/2.0.0/os_arch", - ".terraform/plugins/registry.terraform.io/hashicorp/c/2.0.0/"+platformStr, - ); err != nil { - t.Fatal(err) - } - - // An empty MultiSource serves as a way to make sure no providers are - // actually available for installation, which suits us here because - // we're testing an error case. - providerSource := getproviders.MultiSource{} - - ui := cli.NewMockUi() - m := Meta{ - Ui: ui, - ProviderSource: providerSource, - } - - c := &InitCommand{ - Meta: m, - } - - args := []string{ - "-backend=false", - } - if code := c.Run(args); code == 0 { - t.Fatalf("succeeded; want error\n%s", ui.OutputWriter.String()) - } - - // We remove all of the newlines so that we don't need to contend with - // the automatic word wrapping that our diagnostic printer does. - stderr := strings.Replace(ui.ErrorWriter.String(), "\n", " ", -1) - - if got, want := stderr, `example.com/test/a: no available releases match the given constraints`; !strings.Contains(got, want) { - t.Errorf("missing error about example.com/test/a\nwant substring: %s\n%s", want, got) - } - if got, want := stderr, `example.com/test/b: no available releases match the given constraints`; !strings.Contains(got, want) { - t.Errorf("missing error about example.com/test/b\nwant substring: %s\n%s", want, got) - } - if got, want := stderr, `hashicorp/c: no available releases match the given constraints`; !strings.Contains(got, want) { - t.Errorf("missing error about registry.terraform.io/hashicorp/c\nwant substring: %s\n%s", want, got) - } - - if got, want := stderr, `terraform.d/plugins/example.com/test/a`; strings.Contains(got, want) { - // We _don't_ expect to see a warning about the "a" provider, because - // there's no copy of that in the legacy plugin cache dir. - t.Errorf("unexpected suggested path for local example.com/test/a\ndon't want substring: %s\n%s", want, got) - } - if got, want := stderr, `terraform.d/plugins/example.com/test/b/1.1.0/`+platformStr; !strings.Contains(got, want) { - // ...but we should see a warning about the "b" provider, because - // there's an entry for that in the legacy cache dir. - t.Errorf("missing suggested path for local example.com/test/b 1.0.0 on %s\nwant substring: %s\n%s", platformStr, want, got) - } - if got, want := stderr, `terraform.d/plugins/registry.terraform.io/hashicorp/c`; strings.Contains(got, want) { - // We _don't_ expect to see a warning about the "a" provider, even - // though it's in the cache dir, because it's an official provider - // and so we assume it ended up there as a result of normal provider - // installation in Terraform 0.13. - t.Errorf("unexpected suggested path for local hashicorp/c\ndon't want substring: %s\n%s", want, got) - } -} - func TestInit_getProviderLegacyFromState(t *testing.T) { // Create a temporary working directory that is empty td := tempDir(t) @@ -1230,8 +1097,8 @@ func TestInit_getProviderDetectedLegacy(t *testing.T) { // unknown provider, and the registry source will allow us to look up the // appropriate namespace if possible. providerSource, psClose := newMockProviderSource(t, map[string][]string{ - "hashicorp/foo": []string{"1.2.3"}, - "terraform-providers/baz": []string{"2.3.4"}, // this will not be installed + "hashicorp/foo": {"1.2.3"}, + "terraform-providers/baz": {"2.3.4"}, // this will not be installed }) defer psClose() registrySource, rsClose := testRegistrySource(t) @@ -1287,15 +1154,14 @@ func TestInit_getProviderDetectedLegacy(t *testing.T) { func TestInit_providerSource(t *testing.T) { // Create a temporary working directory that is empty td := tempDir(t) - configDirName := "init-required-providers" - testCopyDir(t, testFixturePath(configDirName), filepath.Join(td, configDirName)) + testCopyDir(t, testFixturePath("init-required-providers"), td) defer os.RemoveAll(td) defer testChdir(t, td)() providerSource, close := newMockProviderSource(t, map[string][]string{ - "test": []string{"1.2.3", "1.2.4"}, - "test-beta": []string{"1.2.4"}, - "source": []string{"1.2.2", "1.2.3", "1.2.1"}, + "test": {"1.2.3", "1.2.4"}, + "test-beta": {"1.2.4"}, + "source": {"1.2.2", "1.2.3", "1.2.1"}, }) defer close() @@ -1310,7 +1176,7 @@ func TestInit_providerSource(t *testing.T) { Meta: m, } - args := []string{configDirName} + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) @@ -1394,15 +1260,14 @@ func TestInit_cancel(t *testing.T) { // platforms) were sent to it, testing that it is interruptible. td := tempDir(t) - configDirName := "init-required-providers" - testCopyDir(t, testFixturePath(configDirName), filepath.Join(td, configDirName)) + testCopyDir(t, testFixturePath("init-required-providers"), td) defer os.RemoveAll(td) defer testChdir(t, td)() providerSource, closeSrc := newMockProviderSource(t, map[string][]string{ - "test": []string{"1.2.3", "1.2.4"}, - "test-beta": []string{"1.2.4"}, - "source": []string{"1.2.2", "1.2.3", "1.2.1"}, + "test": {"1.2.3", "1.2.4"}, + "test-beta": {"1.2.4"}, + "source": {"1.2.2", "1.2.3", "1.2.1"}, }) defer closeSrc() @@ -1423,7 +1288,7 @@ func TestInit_cancel(t *testing.T) { Meta: m, } - args := []string{configDirName} + args := []string{} if code := c.Run(args); code == 0 { t.Fatalf("succeeded; wanted error") @@ -1446,11 +1311,11 @@ func TestInit_getUpgradePlugins(t *testing.T) { providerSource, close := newMockProviderSource(t, map[string][]string{ // looking for an exact version - "exact": []string{"1.2.3"}, + "exact": {"1.2.3"}, // config requires >= 2.3.3 - "greater-than": []string{"2.3.4", "2.3.3", "2.3.0"}, + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, // config specifies > 1.0.0 , < 3.0.0 - "between": []string{"3.4.5", "2.3.4", "1.2.3"}, + "between": {"3.4.5", "2.3.4", "1.2.3"}, }) defer close() @@ -1462,8 +1327,8 @@ func TestInit_getUpgradePlugins(t *testing.T) { } installFakeProviderPackages(t, &m, map[string][]string{ - "exact": []string{"0.0.1"}, - "greater-than": []string{"2.3.3"}, + "exact": {"0.0.1"}, + "greater-than": {"2.3.3"}, }) c := &InitCommand{ @@ -1570,11 +1435,11 @@ func TestInit_getProviderMissing(t *testing.T) { providerSource, close := newMockProviderSource(t, map[string][]string{ // looking for exact version 1.2.3 - "exact": []string{"1.2.4"}, + "exact": {"1.2.4"}, // config requires >= 2.3.3 - "greater-than": []string{"2.3.4", "2.3.3", "2.3.0"}, + "greater-than": {"2.3.4", "2.3.3", "2.3.0"}, // config specifies - "between": []string{"3.4.5", "2.3.4", "1.2.3"}, + "between": {"3.4.5", "2.3.4", "1.2.3"}, }) defer close() @@ -1780,14 +1645,14 @@ func TestInit_pluginDirProviders(t *testing.T) { // for a moment that they are provider cache directories just because that // allows us to lean on our existing test helper functions to do this. for i, def := range [][]string{ - []string{"exact", "1.2.3"}, - []string{"greater-than", "2.3.4"}, - []string{"between", "2.3.4"}, + {"exact", "1.2.3"}, + {"greater-than", "2.3.4"}, + {"between", "2.3.4"}, } { name, version := def[0], def[1] dir := providercache.NewDir(pluginPath[i]) installFakeProviderPackagesElsewhere(t, dir, map[string][]string{ - name: []string{version}, + name: {version}, }) } @@ -1853,7 +1718,7 @@ func TestInit_pluginDirProvidersDoesNotGet(t *testing.T) { // but we should ignore it because -plugin-dir is set and thus this // source is temporarily overridden during install. providerSource, close := newMockProviderSource(t, map[string][]string{ - "between": []string{"2.3.4"}, + "between": {"2.3.4"}, }) defer close() @@ -1880,13 +1745,13 @@ func TestInit_pluginDirProvidersDoesNotGet(t *testing.T) { // for a moment that they are provider cache directories just because that // allows us to lean on our existing test helper functions to do this. for i, def := range [][]string{ - []string{"exact", "1.2.3"}, - []string{"greater-than", "2.3.4"}, + {"exact", "1.2.3"}, + {"greater-than", "2.3.4"}, } { name, version := def[0], def[1] dir := providercache.NewDir(pluginPath[i]) installFakeProviderPackagesElsewhere(t, dir, map[string][]string{ - name: []string{version}, + name: {version}, }) } diff --git a/command/internal_plugin.go b/command/internal_plugin.go deleted file mode 100644 index 33de8569a..000000000 --- a/command/internal_plugin.go +++ /dev/null @@ -1,97 +0,0 @@ -package command - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/plugin" - "github.com/kardianos/osext" -) - -// InternalPluginCommand is a Command implementation that allows plugins to be -// compiled into the main Terraform binary and executed via a subcommand. -type InternalPluginCommand struct { - Meta -} - -const TFSPACE = "-TFSPACE-" - -// BuildPluginCommandString builds a special string for executing internal -// plugins. It has the following format: -// -// /path/to/terraform-TFSPACE-internal-plugin-TFSPACE-terraform-provider-aws -// -// We split the string on -TFSPACE- to build the command executor. The reason we -// use -TFSPACE- is so we can support spaces in the /path/to/terraform part. -func BuildPluginCommandString(pluginType, pluginName string) (string, error) { - terraformPath, err := osext.Executable() - if err != nil { - return "", err - } - parts := []string{terraformPath, "internal-plugin", pluginType, pluginName} - return strings.Join(parts, TFSPACE), nil -} - -// Internal plugins do not support any CLI args, but we do receive flags that -// main.go:mergeEnvArgs has merged in from EnvCLI. Instead of making main.go -// aware of this exception, we strip all flags from our args. Flags are easily -// identified by the '-' prefix, ensured by the cli package used. -func StripArgFlags(args []string) []string { - argsNoFlags := []string{} - for i := range args { - if !strings.HasPrefix(args[i], "-") { - argsNoFlags = append(argsNoFlags, args[i]) - } - } - return argsNoFlags -} - -func (c *InternalPluginCommand) Run(args []string) int { - // strip flags from args, only use subcommands. - args = StripArgFlags(args) - - if len(args) != 2 { - log.Printf("Wrong number of args; expected: terraform internal-plugin pluginType pluginName") - return 1 - } - - pluginType := args[0] - pluginName := args[1] - - log.SetPrefix(fmt.Sprintf("%s-%s (internal) ", pluginName, pluginType)) - - switch pluginType { - case "provisioner": - pluginFunc, found := InternalProvisioners[pluginName] - if !found { - log.Printf("[ERROR] Could not load provisioner: %s", pluginName) - return 1 - } - log.Printf("[INFO] Starting provisioner plugin %s", pluginName) - plugin.Serve(&plugin.ServeOpts{ - ProvisionerFunc: pluginFunc, - }) - default: - log.Printf("[ERROR] Invalid plugin type %s", pluginType) - return 1 - } - - return 0 -} - -func (c *InternalPluginCommand) Help() string { - helpText := ` -Usage: terraform internal-plugin pluginType pluginName - - Runs an internally-compiled version of a plugin from the terraform binary. - - NOTE: this is an internal command and you should not call it yourself. -` - - return strings.TrimSpace(helpText) -} - -func (c *InternalPluginCommand) Synopsis() string { - return "internal plugin command" -} diff --git a/command/internal_plugin_list.go b/command/internal_plugin_list.go deleted file mode 100644 index e26c2c192..000000000 --- a/command/internal_plugin_list.go +++ /dev/null @@ -1,28 +0,0 @@ -// -// This file is automatically generated by scripts/generate-plugins.go -- Do not edit! -// -package command - -import ( - chefprovisioner "github.com/hashicorp/terraform/builtin/provisioners/chef" - fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file" - habitatprovisioner "github.com/hashicorp/terraform/builtin/provisioners/habitat" - localexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/local-exec" - puppetprovisioner "github.com/hashicorp/terraform/builtin/provisioners/puppet" - remoteexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" - saltmasterlessprovisioner "github.com/hashicorp/terraform/builtin/provisioners/salt-masterless" - - "github.com/hashicorp/terraform/plugin" -) - -var InternalProviders = map[string]plugin.ProviderFunc{} - -var InternalProvisioners = map[string]plugin.ProvisionerFunc{ - "chef": chefprovisioner.Provisioner, - "file": fileprovisioner.Provisioner, - "habitat": habitatprovisioner.Provisioner, - "local-exec": localexecprovisioner.Provisioner, - "puppet": puppetprovisioner.Provisioner, - "remote-exec": remoteexecprovisioner.Provisioner, - "salt-masterless": saltmasterlessprovisioner.Provisioner, -} diff --git a/command/internal_plugin_test.go b/command/internal_plugin_test.go deleted file mode 100644 index bcfb97826..000000000 --- a/command/internal_plugin_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package command - -import ( - "testing" -) - -func TestInternalPlugin_InternalProviders(t *testing.T) { - m := new(Meta) - providers := m.internalProviders() - // terraform is the only provider moved back to internal - for _, name := range []string{"terraform"} { - pf, ok := providers[name] - if !ok { - t.Errorf("Expected to find %s in InternalProviders", name) - } - - provider, err := pf() - if err != nil { - t.Fatal(err) - } - - if provider == nil { - t.Fatal("provider factory returned a nil provider") - } - } -} - -func TestInternalPlugin_InternalProvisioners(t *testing.T) { - for _, name := range []string{"chef", "file", "local-exec", "remote-exec", "salt-masterless"} { - if _, ok := InternalProvisioners[name]; !ok { - t.Errorf("Expected to find %s in InternalProvisioners", name) - } - } -} - -func TestInternalPlugin_BuildPluginCommandString(t *testing.T) { - actual, err := BuildPluginCommandString("provisioner", "remote-exec") - if err != nil { - t.Fatalf(err.Error()) - } - - expected := "-TFSPACE-internal-plugin-TFSPACE-provisioner-TFSPACE-remote-exec" - if actual[len(actual)-len(expected):] != expected { - t.Errorf("Expected command to end with %s; got:\n%s\n", expected, actual) - } -} - -func TestInternalPlugin_StripArgFlags(t *testing.T) { - actual := StripArgFlags([]string{"provisioner", "remote-exec", "-var-file=my_vars.tfvars", "-flag"}) - expected := []string{"provisioner", "remote-exec"} - // Must be same length and order. - if len(actual) != len(expected) || expected[0] != actual[0] || actual[1] != actual[1] { - t.Fatalf("Expected args to be exactly '%s', got '%s'", expected, actual) - } -} diff --git a/command/jsonprovider/provider_test.go b/command/jsonprovider/provider_test.go index 8ae049f04..1d6bfd724 100644 --- a/command/jsonprovider/provider_test.go +++ b/command/jsonprovider/provider_test.go @@ -7,7 +7,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" ) @@ -130,14 +129,6 @@ func TestMarshalProvider(t *testing.T) { } } -func testProviders() *terraform.Schemas { - return &terraform.Schemas{ - Providers: map[addrs.Provider]*terraform.ProviderSchema{ - addrs.NewDefaultProvider("test"): testProvider(), - }, - } -} - func testProvider() *terraform.ProviderSchema { return &terraform.ProviderSchema{ Provider: &configschema.Block{ diff --git a/command/jsonstate/state.go b/command/jsonstate/state.go index 24eba59b5..bb5ba6a55 100644 --- a/command/jsonstate/state.go +++ b/command/jsonstate/state.go @@ -9,7 +9,6 @@ import ( ctyjson "github.com/zclconf/go-cty/cty/json" "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statefile" "github.com/hashicorp/terraform/terraform" @@ -100,7 +99,10 @@ type resource struct { // resource, whose structure depends on the resource type schema. type attributeValues map[string]interface{} -func marshalAttributeValues(value cty.Value, schema *configschema.Block) attributeValues { +func marshalAttributeValues(value cty.Value) attributeValues { + // unmark our value to show all values + value, _ = value.UnmarkDeep() + if value == cty.NilVal || value.IsNull() { return nil } @@ -196,14 +198,30 @@ func marshalRootModule(s *states.State, schemas *terraform.Schemas) (module, err return ret, err } - // build a map of module -> [child module addresses] - moduleMap := make(map[string][]addrs.ModuleInstance) + // build a map of module -> set[child module addresses] + moduleChildSet := make(map[string]map[string]struct{}) for _, mod := range s.Modules { if mod.Addr.IsRoot() { continue } else { - parent := mod.Addr.Parent().String() - moduleMap[parent] = append(moduleMap[parent], mod.Addr) + for childAddr := mod.Addr; !childAddr.IsRoot(); childAddr = childAddr.Parent() { + if _, ok := moduleChildSet[childAddr.Parent().String()]; !ok { + moduleChildSet[childAddr.Parent().String()] = map[string]struct{}{} + } + moduleChildSet[childAddr.Parent().String()][childAddr.String()] = struct{}{} + } + } + } + + // transform the previous map into map of module -> [child module addresses] + moduleMap := make(map[string][]addrs.ModuleInstance) + for parent, children := range moduleChildSet { + for child := range children { + childModuleInstance, diags := addrs.ParseModuleInstanceStr(child) + if diags.HasErrors() { + return ret, diags.Err() + } + moduleMap[parent] = append(moduleMap[parent], childModuleInstance) } } @@ -222,14 +240,19 @@ func marshalModules( ) ([]module, error) { var ret []module for _, child := range modules { - stateMod := s.Module(child) // cm for child module, naming things is hard. - cm := module{Address: stateMod.Addr.String()} - rs, err := marshalResources(stateMod.Resources, stateMod.Addr, schemas) - if err != nil { - return nil, err + cm := module{Address: child.String()} + + // the module may be resourceless and contain only submodules, it will then be nil here + stateMod := s.Module(child) + if stateMod != nil { + rs, err := marshalResources(stateMod.Resources, stateMod.Addr, schemas) + if err != nil { + return nil, err + } + cm.Resources = rs } - cm.Resources = rs + if moduleMap[child.String()] != nil { moreChildModules, err := marshalModules(s, schemas, moduleMap[child.String()], moduleMap) if err != nil { @@ -295,7 +318,7 @@ func marshalResources(resources map[string]*states.Resource, module addrs.Module return nil, err } - current.AttributeValues = marshalAttributeValues(riObj.Value, schema) + current.AttributeValues = marshalAttributeValues(riObj.Value) if len(riObj.Dependencies) > 0 { dependencies := make([]string, len(riObj.Dependencies)) @@ -327,7 +350,7 @@ func marshalResources(resources map[string]*states.Resource, module addrs.Module return nil, err } - deposed.AttributeValues = marshalAttributeValues(riObj.Value, schema) + deposed.AttributeValues = marshalAttributeValues(riObj.Value) if len(riObj.Dependencies) > 0 { dependencies := make([]string, len(riObj.Dependencies)) diff --git a/command/jsonstate/state_test.go b/command/jsonstate/state_test.go index 55d3c47f6..683d38378 100644 --- a/command/jsonstate/state_test.go +++ b/command/jsonstate/state_test.go @@ -75,60 +75,27 @@ func TestMarshalOutputs(t *testing.T) { func TestMarshalAttributeValues(t *testing.T) { tests := []struct { - Attr cty.Value - Schema *configschema.Block - Want attributeValues + Attr cty.Value + Want attributeValues }{ { cty.NilVal, - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, nil, }, { cty.NullVal(cty.String), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, nil, }, { cty.ObjectVal(map[string]cty.Value{ "foo": cty.StringVal("bar"), }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, attributeValues{"foo": json.RawMessage(`"bar"`)}, }, { cty.ObjectVal(map[string]cty.Value{ "foo": cty.NullVal(cty.String), }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - }, - }, attributeValues{"foo": json.RawMessage(`null`)}, }, { @@ -141,18 +108,22 @@ func TestMarshalAttributeValues(t *testing.T) { cty.StringVal("moon"), }), }), - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.Map(cty.String), - Required: true, - }, - "baz": { - Type: cty.List(cty.String), - Optional: true, - }, - }, + attributeValues{ + "bar": json.RawMessage(`{"hello":"world"}`), + "baz": json.RawMessage(`["goodnight","moon"]`), }, + }, + // Marked values + { + cty.ObjectVal(map[string]cty.Value{ + "bar": cty.MapVal(map[string]cty.Value{ + "hello": cty.StringVal("world"), + }), + "baz": cty.ListVal([]cty.Value{ + cty.StringVal("goodnight"), + cty.StringVal("moon").Mark("sensitive"), + }), + }), attributeValues{ "bar": json.RawMessage(`{"hello":"world"}`), "baz": json.RawMessage(`["goodnight","moon"]`), @@ -161,7 +132,7 @@ func TestMarshalAttributeValues(t *testing.T) { } for _, test := range tests { - got := marshalAttributeValues(test.Attr, test.Schema) + got := marshalAttributeValues(test.Attr) eq := reflect.DeepEqual(got, test.Want) if !eq { t.Fatalf("wrong result:\nGot: %#v\nWant: %#v\n", got, test.Want) @@ -583,6 +554,59 @@ func TestMarshalModules_nested(t *testing.T) { } } +func TestMarshalModules_parent_no_resources(t *testing.T) { + subModule, _ := addrs.ParseModuleInstanceStr("module.child.module.submodule") + testState := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(subModule), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: subModule.Module(), + }, + ) + }) + got, err := marshalRootModule(testState, testSchemas()) + + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if len(got.ChildModules) != 1 { + t.Fatalf("wrong result! got %d modules, expected 1", len(got.ChildModules)) + } + + if got.ChildModules[0].Address != "module.child" { + t.Fatalf("wrong result! got %#v\n", got) + } + + if got.ChildModules[0].ChildModules[0].Address != "module.child.module.submodule" { + t.Fatalf("wrong result! got %#v\n", got) + } +} + func testSchemas() *terraform.Schemas { return &terraform.Schemas{ Providers: map[addrs.Provider]*terraform.ProviderSchema{ diff --git a/command/meta.go b/command/meta.go index 2eb2ba895..4f01af630 100644 --- a/command/meta.go +++ b/command/meta.go @@ -15,22 +15,26 @@ import ( "time" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/terraform-svchost/disco" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/backend/local" "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/command/views" "github.com/hashicorp/terraform/command/webbrowser" "github.com/hashicorp/terraform/configs/configload" - "github.com/hashicorp/terraform/helper/experiment" - "github.com/hashicorp/terraform/helper/wrappedstreams" "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/internal/terminal" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/provisioners" "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" + + legacy "github.com/hashicorp/terraform/internal/legacy/terraform" ) // Meta are the meta-options that are available on all or most commands. @@ -50,6 +54,17 @@ type Meta struct { // for some reason. OriginalWorkingDir string + // Streams tracks the raw Stdout, Stderr, and Stdin handles along with + // some basic metadata about them, such as whether each is connected to + // a terminal, how wide the possible terminal is, etc. + // + // For historical reasons this might not be set in unit test code, and + // so functions working with this field must check if it's nil and + // do some default behavior instead if so, rather than panicking. + Streams *terminal.Streams + + View *views.View + Color bool // True if output should be colored GlobalPluginDirs []string // Additional paths to search for plugins Ui cli.Ui // Ui for output @@ -147,14 +162,15 @@ type Meta struct { configLoader *configload.Loader // backendState is the currently active backend state - backendState *terraform.BackendState + backendState *legacy.BackendState // Variables for the context (private) variableArgs rawFlags input bool // Targets for this context (private) - targets []addrs.Targetable + targets []addrs.Targetable + targetFlags []string // Internal fields color bool @@ -196,7 +212,6 @@ type Meta struct { stateOutPath string backupPath string parallelism int - provider string stateLock bool stateLockTimeout time.Duration forceInitCopy bool @@ -205,6 +220,10 @@ type Meta struct { // Used with the import command to allow import of state when no matching config exists. allowMissingConfig bool + + // Used with commands which write state to allow users to write remote + // state even if the remote and local Terraform versions don't match. + ignoreRemoteVersion bool } type testingOverrides struct { @@ -284,15 +303,42 @@ func (m *Meta) UIInput() terraform.UIInput { } } +// OutputColumns returns the number of columns that normal (non-error) UI +// output should be wrapped to fill. +// +// This is the column count to use if you'll be printing your message via +// the Output or Info methods of m.Ui. +func (m *Meta) OutputColumns() int { + if m.Streams == nil { + // A default for unit tests that don't populate Meta fully. + return 78 + } + return m.Streams.Stdout.Columns() +} + +// ErrorColumns returns the number of columns that error UI output should be +// wrapped to fill. +// +// This is the column count to use if you'll be printing your message via +// the Error or Warn methods of m.Ui. +func (m *Meta) ErrorColumns() int { + if m.Streams == nil { + // A default for unit tests that don't populate Meta fully. + return 78 + } + return m.Streams.Stderr.Columns() +} + // StdinPiped returns true if the input is piped. func (m *Meta) StdinPiped() bool { - fi, err := wrappedstreams.Stdin().Stat() - if err != nil { - // If there is an error, let's just say its not piped + if m.Streams == nil { + // If we don't have m.Streams populated then we're presumably in a unit + // test that doesn't properly populate Meta, so we'll just say the + // output _isn't_ piped because that's the common case and so most likely + // to be useful to a unit test. return false } - - return fi.Mode()&os.ModeNamedPipe != 0 + return !m.Streams.Stdin.IsTerminal() } // InterruptibleContext returns a context.Context that will be cancelled @@ -421,6 +467,28 @@ func (m *Meta) contextOpts() (*terraform.ContextOpts, error) { } opts.Providers = providerFactories opts.Provisioners = m.provisionerFactories() + + // Read the dependency locks so that they can be verified against the + // provider requirements in the configuration + lockedDependencies, diags := m.lockedDependencies() + + // If the locks file is invalid, we should fail early rather than + // ignore it. A missing locks file will return no error. + if diags.HasErrors() { + return nil, diags.Err() + } + opts.LockedDependencies = lockedDependencies + + // If any unmanaged providers or dev overrides are enabled, they must + // be listed in the context so that they can be ignored when verifying + // the locks against the configuration + opts.ProvidersInDevelopment = make(map[addrs.Provider]struct{}) + for provider := range m.UnmanagedProviders { + opts.ProvidersInDevelopment[provider] = struct{}{} + } + for provider := range m.ProviderDevOverrides { + opts.ProvidersInDevelopment[provider] = struct{}{} + } } opts.ProviderSHA256s = m.providerPluginsLock().Read() @@ -434,6 +502,7 @@ func (m *Meta) contextOpts() (*terraform.ContextOpts, error) { } // defaultFlagSet creates a default flag set for commands. +// See also command/arguments/default.go func (m *Meta) defaultFlagSet(n string) *flag.FlagSet { f := flag.NewFlagSet(n, flag.ContinueOnError) f.SetOutput(ioutil.Discard) @@ -444,13 +513,24 @@ func (m *Meta) defaultFlagSet(n string) *flag.FlagSet { return f } +// ignoreRemoteVersionFlagSet add the ignore-remote version flag to suppress +// the error when the configured Terraform version on the remote workspace +// does not match the local Terraform version. +func (m *Meta) ignoreRemoteVersionFlagSet(n string) *flag.FlagSet { + f := m.defaultFlagSet(n) + + f.BoolVar(&m.ignoreRemoteVersion, "ignore-remote-version", false, "continue even if remote and local Terraform versions are incompatible") + + return f +} + // extendedFlagSet adds custom flags that are mostly used by commands // that are used to run an operation like plan or apply. func (m *Meta) extendedFlagSet(n string) *flag.FlagSet { f := m.defaultFlagSet(n) f.BoolVar(&m.input, "input", true, "input") - f.Var((*FlagTargetSlice)(&m.targets), "target", "resource to target") + f.Var((*FlagStringSlice)(&m.targetFlags), "target", "resource to target") f.BoolVar(&m.compactWarnings, "compact-warnings", false, "use compact warnings") if m.variableArgs.items == nil { @@ -461,9 +541,6 @@ func (m *Meta) extendedFlagSet(n string) *flag.FlagSet { f.Var(varValues, "var", "variables") f.Var(varFiles, "var-file", "variable file") - // Experimental features - experiment.Flag(f) - // commands that bypass locking will supply their own flag on this var, // but set the initial meta value to true as a failsafe. m.stateLock = true @@ -471,9 +548,46 @@ func (m *Meta) extendedFlagSet(n string) *flag.FlagSet { return f } -// process will process the meta-parameters out of the arguments. This +// parseTargetFlags must be called for any commands supporting -target +// arguments. This method attempts to parse each -target flag into an +// addrs.Target, storing in the Meta.targets slice. +// +// If any flags cannot be parsed, we rewrap the first error diagnostic with a +// custom title to clarify the source of the error. The normal approach of +// directly returning the diags from HCL or the addrs package results in +// confusing incorrect "source" results when presented. +func (m *Meta) parseTargetFlags() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + m.targets = nil + for _, tf := range m.targetFlags { + traversal, syntaxDiags := hclsyntax.ParseTraversalAbs([]byte(tf), "", hcl.Pos{Line: 1, Column: 1}) + if syntaxDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid target %q", tf), + syntaxDiags[0].Detail, + )) + continue + } + + target, targetDiags := addrs.ParseTarget(traversal) + if targetDiags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid target %q", tf), + targetDiags[0].Description().Detail, + )) + continue + } + + m.targets = append(m.targets, target.Subject) + } + return diags +} + +// process will process any -no-color entries out of the arguments. This // will potentially modify the args in-place. It will return the resulting -// slice. +// slice, and update the Meta and Ui. func (m *Meta) process(args []string) []string { // We do this so that we retain the ability to technically call // process multiple times, even if we have no plans to do so @@ -559,6 +673,8 @@ func (m *Meta) showDiagnostics(vals ...interface{}) { return } + outputWidth := m.ErrorColumns() + diags = diags.ConsolidateWarnings(1) // Since warning messages are generally competing @@ -584,11 +700,13 @@ func (m *Meta) showDiagnostics(vals ...interface{}) { } for _, diag := range diags { - // TODO: Actually measure the terminal width and pass it here. - // For now, we don't have easy access to the writer that - // ui.Error (etc) are writing to and thus can't interrogate - // to see if it's a terminal and what size it is. - msg := format.Diagnostic(diag, m.configSources(), m.Colorize(), 78) + var msg string + if m.Color { + msg = format.Diagnostic(diag, m.configSources(), m.Colorize(), outputWidth) + } else { + msg = format.DiagnosticPlain(diag, m.configSources(), outputWidth) + } + switch diag.Severity() { case tfdiags.Error: m.Ui.Error(msg) @@ -600,49 +718,6 @@ func (m *Meta) showDiagnostics(vals ...interface{}) { } } -// outputShadowError outputs the error from ctx.ShadowError. If the -// error is nil then nothing happens. If output is false then it isn't -// outputted to the user (you can define logic to guard against outputting). -func (m *Meta) outputShadowError(err error, output bool) bool { - // Do nothing if no error - if err == nil { - return false - } - - // If not outputting, do nothing - if !output { - return false - } - - // Write the shadow error output to a file - path := fmt.Sprintf("terraform-error-%d.log", time.Now().UTC().Unix()) - if err := ioutil.WriteFile(path, []byte(err.Error()), 0644); err != nil { - // If there is an error writing it, just let it go - log.Printf("[ERROR] Error writing shadow error: %s", err) - return false - } - - // Output! - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][bold][yellow]\nExperimental feature failure! Please report a bug.\n\n"+ - "This is not an error. Your Terraform operation completed successfully.\n"+ - "Your real infrastructure is unaffected by this message.\n\n"+ - "[reset][yellow]While running, Terraform sometimes tests experimental features in the\n"+ - "background. These features cannot affect real state and never touch\n"+ - "real infrastructure. If the features work properly, you see nothing.\n"+ - "If the features fail, this message appears.\n\n"+ - "You can report an issue at: https://github.com/hashicorp/terraform/issues\n\n"+ - "The failure was written to %q. Please\n"+ - "double check this file contains no sensitive information and report\n"+ - "it with your issue.\n\n"+ - "This is not an error. Your terraform operation completed successfully\n"+ - "and your real infrastructure is unaffected by this message.", - path, - ))) - - return true -} - // WorkspaceNameEnvVar is the name of the environment variable that can be used // to set the name of the Terraform workspace, overriding the workspace chosen // by `terraform workspace select`. @@ -651,14 +726,14 @@ func (m *Meta) outputShadowError(err error, output bool) bool { // and `terraform workspace delete`. const WorkspaceNameEnvVar = "TF_WORKSPACE" -var invalidWorkspaceNameEnvVar = fmt.Errorf("Invalid workspace name set using %s", WorkspaceNameEnvVar) +var errInvalidWorkspaceNameEnvVar = fmt.Errorf("Invalid workspace name set using %s", WorkspaceNameEnvVar) // Workspace returns the name of the currently configured workspace, corresponding // to the desired named state. func (m *Meta) Workspace() (string, error) { current, overridden := m.WorkspaceOverridden() if overridden && !validWorkspaceName(current) { - return "", invalidWorkspaceNameEnvVar + return "", errInvalidWorkspaceNameEnvVar } return current, nil } diff --git a/command/meta_backend.go b/command/meta_backend.go index 61c645aaf..f7f2cba84 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" "github.com/hashicorp/terraform/backend" + remoteBackend "github.com/hashicorp/terraform/backend/remote" "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/plans" @@ -28,6 +29,7 @@ import ( backendInit "github.com/hashicorp/terraform/backend/init" backendLocal "github.com/hashicorp/terraform/backend/local" + legacy "github.com/hashicorp/terraform/internal/legacy/terraform" ) // BackendOpts are the options used to initialize a backend.Backend. @@ -100,7 +102,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics log.Printf("[TRACE] Meta.Backend: instantiated backend of type %T", b) } - // Setup the CLI opts we pass into backends that support it. + // Set up the CLI opts we pass into backends that support it. cliOpts, err := m.backendCLIOpts() if err != nil { diags = diags.Append(err) @@ -159,7 +161,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics // with inside backendFromConfig, because we still need that codepath // to be able to recognize the lack of a config as distinct from // explicitly setting local until we do some more refactoring here. - m.backendState = &terraform.BackendState{ + m.backendState = &legacy.BackendState{ Type: "local", ConfigRaw: json.RawMessage("{}"), } @@ -306,6 +308,7 @@ func (m *Meta) backendCLIOpts() (*backend.CLIOpts, error) { return &backend.CLIOpts{ CLI: m.Ui, CLIColor: m.Colorize(), + Streams: m.Streams, ShowDiagnostics: m.showDiagnostics, StatePath: m.statePath, StateOutPath: m.stateOutPath, @@ -460,7 +463,7 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di s := sMgr.State() if s == nil { log.Printf("[TRACE] Meta.Backend: backend has not previously been initialized in this working directory") - s = terraform.NewState() + s = legacy.NewState() } else if s.Backend != nil { log.Printf("[TRACE] Meta.Backend: working directory was previously initialized for %q backend", s.Backend.Type) } else { @@ -817,9 +820,9 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *clistate.Local // Store the metadata in our saved state location s := sMgr.State() if s == nil { - s = terraform.NewState() + s = legacy.NewState() } - s.Backend = &terraform.BackendState{ + s.Backend = &legacy.BackendState{ Type: c.Type, ConfigRaw: json.RawMessage(configJSON), Hash: uint64(cHash), @@ -901,9 +904,9 @@ func (m *Meta) backend_C_r_S_changed(c *configs.Backend, cHash int, sMgr *clista // Update the backend state s = sMgr.State() if s == nil { - s = terraform.NewState() + s = legacy.NewState() } - s.Backend = &terraform.BackendState{ + s.Backend = &legacy.BackendState{ Type: c.Type, ConfigRaw: json.RawMessage(configJSON), Hash: uint64(cHash), @@ -995,7 +998,7 @@ func (m *Meta) backend_C_r_S_unchanged(c *configs.Backend, cHash int, sMgr *clis // this function will conservatively assume that migration is required, // expecting that the migration code will subsequently deal with the same // errors. -func (m *Meta) backendConfigNeedsMigration(c *configs.Backend, s *terraform.BackendState) bool { +func (m *Meta) backendConfigNeedsMigration(c *configs.Backend, s *legacy.BackendState) bool { if s == nil || s.Empty() { log.Print("[TRACE] backendConfigNeedsMigration: no cached config, so migration is required") return true @@ -1091,6 +1094,38 @@ func (m *Meta) backendInitRequired(reason string) { "[reset]"+strings.TrimSpace(errBackendInit)+"\n", reason))) } +// Helper method to ignore remote backend version conflicts. Only call this +// for commands which cannot accidentally upgrade remote state files. +func (m *Meta) ignoreRemoteBackendVersionConflict(b backend.Backend) { + if rb, ok := b.(*remoteBackend.Remote); ok { + rb.IgnoreVersionConflict() + } +} + +// Helper method to check the local Terraform version against the configured +// version in the remote workspace, returning diagnostics if they conflict. +func (m *Meta) remoteBackendVersionCheck(b backend.Backend, workspace string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if rb, ok := b.(*remoteBackend.Remote); ok { + // Allow user override based on command-line flag + if m.ignoreRemoteVersion { + rb.IgnoreVersionConflict() + } + // If the override is set, this check will return a warning instead of + // an error + versionDiags := rb.VerifyWorkspaceTerraformVersion(workspace) + diags = diags.Append(versionDiags) + // If there are no errors resulting from this check, we do not need to + // check again + if !diags.HasErrors() { + rb.IgnoreVersionConflict() + } + } + + return diags +} + //------------------------------------------------------------------- // Output constants and initialization code //------------------------------------------------------------------- @@ -1171,7 +1206,7 @@ Terraform configuration you're using is using a custom configuration for the Terraform backend. Changes to backend configurations require reinitialization. This allows -Terraform to setup the new configuration, copy existing state, etc. This is +Terraform to set up the new configuration, copy existing state, etc. This is only done during "terraform init". Please run that command now then try again. If the change reason above is incorrect, please verify your configuration @@ -1211,12 +1246,3 @@ const successBackendSet = ` Successfully configured the backend %q! Terraform will automatically use this backend unless the backend configuration changes. ` - -const errBackendLegacy = ` -This working directory is configured to use the legacy remote state features -from Terraform 0.8 or earlier. Remote state changed significantly in Terraform -0.9 and the automatic upgrade mechanism has now been removed. - -To upgrade, please first use Terraform v0.11 to complete the upgrade steps: - https://www.terraform.io/docs/backends/legacy-0-8.html -` diff --git a/command/meta_backend_migrate.go b/command/meta_backend_migrate.go index 6283bd99f..35fda9042 100644 --- a/command/meta_backend_migrate.go +++ b/command/meta_backend_migrate.go @@ -65,11 +65,24 @@ func (m *Meta) backendMigrateState(opts *backendMigrateOpts) error { errMigrateLoadStates), opts.TwoType, err) } - // Setup defaults + // Set up defaults opts.oneEnv = backend.DefaultStateName opts.twoEnv = backend.DefaultStateName opts.force = m.forceInitCopy + // Disregard remote Terraform version for the state source backend. If it's a + // Terraform Cloud remote backend, we don't care about the remote version, + // as we are migrating away and will not break a remote workspace. + m.ignoreRemoteBackendVersionConflict(opts.One) + + // Check the remote Terraform version for the state destination backend. If + // it's a Terraform Cloud remote backend, we want to ensure that we don't + // break the workspace by uploading an incompatible state file. + diags := m.remoteBackendVersionCheck(opts.Two, opts.twoEnv) + if diags.HasErrors() { + return diags.Err() + } + // Determine migration behavior based on whether the source/destination // supports multi-state. switch { diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index 07d268818..39b20a0a1 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -1541,7 +1541,7 @@ func TestMetaBackend_planLocalStatePath(t *testing.T) { defer testChdir(t, td)() original := testState() - mark := markStateForMatching(original, "hello") + markStateForMatching(original, "hello") backendConfigBlock := cty.ObjectVal(map[string]cty.Value{ "path": cty.NullVal(cty.String), @@ -1607,7 +1607,7 @@ func TestMetaBackend_planLocalStatePath(t *testing.T) { // Write some state state = states.NewState() - mark = markStateForMatching(state, "changing") + mark := markStateForMatching(state, "changing") s.WriteState(state) if err := s.PersistState(); err != nil { diff --git a/command/meta_config.go b/command/meta_config.go index d5f65355e..51ca27447 100644 --- a/command/meta_config.go +++ b/command/meta_config.go @@ -329,6 +329,9 @@ func (m *Meta) initConfigLoader() (*configload.Loader, error) { return nil, err } m.configLoader = loader + if m.View != nil { + m.View.SetConfigSources(loader.Sources) + } } return m.configLoader, nil } diff --git a/command/meta_providers.go b/command/meta_providers.go index c20c79f2c..3d09a9975 100644 --- a/command/meta_providers.go +++ b/command/meta_providers.go @@ -128,31 +128,6 @@ func (m *Meta) providerGlobalCacheDir() *providercache.Dir { return providercache.NewDir(dir) } -// providerLegacyCacheDir returns an object representing the former location -// of the local cache directory from Terraform 0.13 and earlier. -// -// This is no longer viable for use as a real cache directory because some -// incorrect documentation called for Terraform Cloud users to use it as if it -// were an implied local filesystem mirror directory. Therefore we now use it -// only to generate some hopefully-helpful migration guidance during -// "terraform init" for anyone who _was_ trying to use it as a local filesystem -// mirror directory. -// -// providerLegacyCacheDir returns nil if the legacy cache directory isn't -// present or isn't a directory, so that callers can more easily skip over -// any backward compatibility behavior that applies only when the directory -// is present. -// -// Callers must use the resulting object in a read-only mode only. Don't -// install any new providers into this directory. -func (m *Meta) providerLegacyCacheDir() *providercache.Dir { - dir := filepath.Join(m.DataDir(), "plugins") - if info, err := os.Stat(dir); err != nil || !info.IsDir() { - return nil - } - return providercache.NewDir(dir) -} - // providerInstallSource returns an object that knows how to consult one or // more external sources to determine the availability of and package // locations for versions of Terraform providers that are available for @@ -177,8 +152,35 @@ func (m *Meta) providerInstallSource() getproviders.Source { return m.ProviderSource } -// providerDevOverrideWarnings returns a diagnostics that contains at least -// one warning if and only if there is at least one provider development +// providerDevOverrideInitWarnings returns a diagnostics that contains at +// least one warning if and only if there is at least one provider development +// override in effect. If not, the result is always empty. The result never +// contains error diagnostics. +// +// The init command can use this to include a warning that the results +// may differ from what's expected due to the development overrides. For +// other commands, providerDevOverrideRuntimeWarnings should be used. +func (m *Meta) providerDevOverrideInitWarnings() tfdiags.Diagnostics { + if len(m.ProviderDevOverrides) == 0 { + return nil + } + var detailMsg strings.Builder + detailMsg.WriteString("The following provider development overrides are set in the CLI configuration:\n") + for addr, path := range m.ProviderDevOverrides { + detailMsg.WriteString(fmt.Sprintf(" - %s in %s\n", addr.ForDisplay(), path)) + } + detailMsg.WriteString("\nSkip terraform init when using provider development overrides. It is not necessary and may error unexpectedly.") + return tfdiags.Diagnostics{ + tfdiags.Sourceless( + tfdiags.Warning, + "Provider development overrides are in effect", + detailMsg.String(), + ), + } +} + +// providerDevOverrideRuntimeWarnings returns a diagnostics that contains at +// least one warning if and only if there is at least one provider development // override in effect. If not, the result is always empty. The result never // contains error diagnostics. // @@ -187,7 +189,10 @@ func (m *Meta) providerInstallSource() getproviders.Source { // not necessary to bother the user with this warning on every command, but // it's helpful to return it on commands that have externally-visible side // effects and on commands that are used to verify conformance to schemas. -func (m *Meta) providerDevOverrideWarnings() tfdiags.Diagnostics { +// +// See providerDevOverrideInitWarnings for warnings specific to the init +// command. +func (m *Meta) providerDevOverrideRuntimeWarnings() tfdiags.Diagnostics { if len(m.ProviderDevOverrides) == 0 { return nil } diff --git a/command/meta_test.go b/command/meta_test.go index 0de9cf79f..fe212d532 100644 --- a/command/meta_test.go +++ b/command/meta_test.go @@ -236,7 +236,7 @@ func TestMeta_Workspace_override(t *testing.T) { }, "invalid name": { "", - invalidWorkspaceNameEnvVar, + errInvalidWorkspaceNameEnvVar, }, } diff --git a/command/output.go b/command/output.go index 3f5cc2bea..52e4b942a 100644 --- a/command/output.go +++ b/command/output.go @@ -1,14 +1,11 @@ package command import ( - "encoding/json" "fmt" "strings" - ctyjson "github.com/zclconf/go-cty/cty/json" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/tfdiags" ) @@ -19,81 +16,75 @@ type OutputCommand struct { Meta } -func (c *OutputCommand) Run(args []string) int { - args = c.Meta.process(args) - var module, statePath string - var jsonOutput bool - cmdFlags := c.Meta.defaultFlagSet("output") - cmdFlags.BoolVar(&jsonOutput, "json", false, "json") - cmdFlags.StringVar(&statePath, "state", "", "path") - cmdFlags.StringVar(&module, "module", "", "module") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) +func (c *OutputCommand) Run(rawArgs []string) int { + // Parse and apply global view arguments + common, rawArgs := arguments.ParseView(rawArgs) + c.View.Configure(common) + + // Parse and validate flags + args, diags := arguments.ParseOutput(rawArgs) + if diags.HasErrors() { + c.View.Diagnostics(diags) + c.View.HelpPrompt("output") return 1 } - args = cmdFlags.Args() - if len(args) > 1 { - c.Ui.Error( - "The output command expects exactly one argument with the name\n" + - "of an output variable or no arguments to show all outputs.\n") - cmdFlags.Usage() + view := views.NewOutput(args.ViewType, c.View) + + // Fetch data from state + outputs, diags := c.Outputs(args.StatePath) + if diags.HasErrors() { + view.Diagnostics(diags) return 1 } - name := "" - if len(args) > 0 { - name = args[0] + // Render the view + viewDiags := view.Output(args.Name, outputs) + diags = diags.Append(viewDiags) + + view.Diagnostics(diags) + + if diags.HasErrors() { + return 1 } + return 0 +} + +func (c *OutputCommand) Outputs(statePath string) (map[string]*states.OutputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Allow state path override if statePath != "" { c.Meta.statePath = statePath } - var diags tfdiags.Diagnostics - // Load the backend b, backendDiags := c.Backend(nil) diags = diags.Append(backendDiags) - if backendDiags.HasErrors() { - c.showDiagnostics(diags) - return 1 + if diags.HasErrors() { + return nil, diags } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + env, err := c.Workspace() if err != nil { - c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) - return 1 + diags = diags.Append(fmt.Errorf("Error selecting workspace: %s", err)) + return nil, diags } // Get the state stateStore, err := b.StateMgr(env) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 + diags = diags.Append(fmt.Errorf("Failed to load state: %s", err)) + return nil, diags } if err := stateStore.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } - - moduleAddr := addrs.RootModuleInstance - if module != "" { - // This option was supported prior to 0.12.0, but no longer supported - // because we only persist the root module outputs in state. - // (We could perhaps re-introduce this by doing an eval walk here to - // repopulate them, similar to how "terraform console" does it, but - // that requires more thought since it would imply this command - // supporting remote operations, which is a big change.) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unsupported option", - "The -module option is no longer supported since Terraform 0.12, because now only root outputs are persisted in the state.", - )) - c.showDiagnostics(diags) - return 1 + diags = diags.Append(fmt.Errorf("Failed to load state: %s", err)) + return nil, diags } state := stateStore.State() @@ -101,102 +92,7 @@ func (c *OutputCommand) Run(args []string) int { state = states.NewState() } - mod := state.Module(moduleAddr) - if mod == nil { - c.Ui.Error(fmt.Sprintf( - "The module %s could not be found. There is nothing to output.", - module)) - return 1 - } - - if !jsonOutput && (state.Empty() || len(mod.OutputValues) == 0) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Warning, - "No outputs found", - "The state file either has no outputs defined, or all the defined "+ - "outputs are empty. Please define an output in your configuration "+ - "with the `output` keyword and run `terraform refresh` for it to "+ - "become available. If you are using interpolation, please verify "+ - "the interpolated value is not empty. You can use the "+ - "`terraform console` command to assist.", - )) - c.showDiagnostics(diags) - return 0 - } - - if name == "" { - if jsonOutput { - // Due to a historical accident, the switch from state version 2 to - // 3 caused our JSON output here to be the full metadata about the - // outputs rather than just the output values themselves as we'd - // show in the single value case. We must now maintain that behavior - // for compatibility, so this is an emulation of the JSON - // serialization of outputs used in state format version 3. - type OutputMeta struct { - Sensitive bool `json:"sensitive"` - Type json.RawMessage `json:"type"` - Value json.RawMessage `json:"value"` - } - outputs := map[string]OutputMeta{} - - for n, os := range mod.OutputValues { - jsonVal, err := ctyjson.Marshal(os.Value, os.Value.Type()) - if err != nil { - diags = diags.Append(err) - c.showDiagnostics(diags) - return 1 - } - jsonType, err := ctyjson.MarshalType(os.Value.Type()) - if err != nil { - diags = diags.Append(err) - c.showDiagnostics(diags) - return 1 - } - outputs[n] = OutputMeta{ - Sensitive: os.Sensitive, - Type: json.RawMessage(jsonType), - Value: json.RawMessage(jsonVal), - } - } - - jsonOutputs, err := json.MarshalIndent(outputs, "", " ") - if err != nil { - diags = diags.Append(err) - c.showDiagnostics(diags) - return 1 - } - c.Ui.Output(string(jsonOutputs)) - return 0 - } else { - c.Ui.Output(outputsAsString(state, moduleAddr, false)) - return 0 - } - } - - os, ok := mod.OutputValues[name] - if !ok { - c.Ui.Error(fmt.Sprintf( - "The output variable requested could not be found in the state\n" + - "file. If you recently added this to your configuration, be\n" + - "sure to run `terraform apply`, since the state won't be updated\n" + - "with new output variables until that command is run.")) - return 1 - } - v := os.Value - - if jsonOutput { - jsonOutput, err := ctyjson.Marshal(v, v.Type()) - if err != nil { - return 1 - } - - c.Ui.Output(string(jsonOutput)) - } else { - result := repl.FormatValue(v, 0) - c.Ui.Output(result) - } - - return 0 + return state.RootModule().OutputValues, nil } func (c *OutputCommand) Help() string { @@ -216,8 +112,12 @@ Options: -no-color If specified, output won't contain any color. -json If specified, machine readable output will be - printed in JSON format + printed in JSON format. + -raw For value types that can be automatically + converted to a string, will print the raw + string directly, rather than a human-oriented + representation of the value. ` return strings.TrimSpace(helpText) } diff --git a/command/output_test.go b/command/output_test.go index 4ca121aa1..3824c0fdc 100644 --- a/command/output_test.go +++ b/command/output_test.go @@ -6,7 +6,6 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/addrs" @@ -24,11 +23,11 @@ func TestOutput(t *testing.T) { statePath := testStateFile(t, originalState) - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } @@ -36,11 +35,13 @@ func TestOutput(t *testing.T) { "-state", statePath, "foo", } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } - actual := strings.TrimSpace(ui.OutputWriter.String()) + actual := strings.TrimSpace(output.Stdout()) if actual != `"bar"` { t.Fatalf("bad: %#v", actual) } @@ -64,22 +65,24 @@ func TestOutput_nestedListAndMap(t *testing.T) { }) statePath := testStateFile(t, originalState) - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } args := []string{ "-state", statePath, } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } - actual := strings.TrimSpace(ui.OutputWriter.String()) + actual := strings.TrimSpace(output.Stdout()) expected := strings.TrimSpace(` foo = tolist([ tomap({ @@ -107,11 +110,11 @@ func TestOutput_json(t *testing.T) { statePath := testStateFile(t, originalState) - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } @@ -119,11 +122,13 @@ func TestOutput_json(t *testing.T) { "-state", statePath, "-json", } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } - actual := strings.TrimSpace(ui.OutputWriter.String()) + actual := strings.TrimSpace(output.Stdout()) expected := "{\n \"foo\": {\n \"sensitive\": false,\n \"type\": \"string\",\n \"value\": \"bar\"\n }\n}" if actual != expected { t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) @@ -135,21 +140,25 @@ func TestOutput_emptyOutputs(t *testing.T) { statePath := testStateFile(t, originalState) p := testProvider() - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), - Ui: ui, + View: view, }, } args := []string{ + "-no-color", "-state", statePath, } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } - if got, want := ui.ErrorWriter.String(), "Warning: No outputs found"; !strings.Contains(got, want) { + // Warning diagnostics should go to stdout + if got, want := output.Stdout(), "Warning: No outputs found"; !strings.Contains(got, want) { t.Fatalf("bad output: expected to contain %q, got:\n%s", want, got) } } @@ -159,11 +168,11 @@ func TestOutput_jsonEmptyOutputs(t *testing.T) { statePath := testStateFile(t, originalState) p := testProvider() - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), - Ui: ui, + View: view, }, } @@ -171,46 +180,19 @@ func TestOutput_jsonEmptyOutputs(t *testing.T) { "-state", statePath, "-json", } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } - actual := strings.TrimSpace(ui.OutputWriter.String()) + actual := strings.TrimSpace(output.Stdout()) expected := "{}" if actual != expected { t.Fatalf("bad:\n%#v\n%#v", expected, actual) } } -func TestMissingModuleOutput(t *testing.T) { - originalState := states.BuildState(func(s *states.SyncState) { - s.SetOutputValue( - addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), - cty.StringVal("bar"), - false, - ) - }) - statePath := testStateFile(t, originalState) - - ui := new(cli.MockUi) - c := &OutputCommand{ - Meta: Meta{ - testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, - }, - } - - args := []string{ - "-state", statePath, - "-module", "not_existing_module", - "blah", - } - - if code := c.Run(args); code != 1 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) - } -} - func TestOutput_badVar(t *testing.T) { originalState := states.BuildState(func(s *states.SyncState) { s.SetOutputValue( @@ -221,11 +203,11 @@ func TestOutput_badVar(t *testing.T) { }) statePath := testStateFile(t, originalState) - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } @@ -233,8 +215,10 @@ func TestOutput_badVar(t *testing.T) { "-state", statePath, "bar", } - if code := c.Run(args); code != 1 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stderr()) } } @@ -253,11 +237,11 @@ func TestOutput_blank(t *testing.T) { }) statePath := testStateFile(t, originalState) - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } @@ -266,23 +250,24 @@ func TestOutput_blank(t *testing.T) { "", } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } expectedOutput := "foo = \"bar\"\nname = \"john-doe\"\n" - output := ui.OutputWriter.String() - if output != expectedOutput { - t.Fatalf("wrong output\ngot: %#v\nwant: %#v", output, expectedOutput) + if got := output.Stdout(); got != expectedOutput { + t.Fatalf("wrong output\ngot: %#v\nwant: %#v", got, expectedOutput) } } func TestOutput_manyArgs(t *testing.T) { - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } @@ -290,23 +275,27 @@ func TestOutput_manyArgs(t *testing.T) { "bad", "bad", } - if code := c.Run(args); code != 1 { - t.Fatalf("bad: \n%s", ui.OutputWriter.String()) + code := c.Run(args) + output := done(t) + if code != 1 { + t.Fatalf("bad: \n%s", output.Stdout()) } } func TestOutput_noArgs(t *testing.T) { - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } args := []string{} - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.OutputWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stdout()) } } @@ -314,11 +303,11 @@ func TestOutput_noState(t *testing.T) { originalState := states.NewState() statePath := testStateFile(t, originalState) - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } @@ -326,8 +315,10 @@ func TestOutput_noState(t *testing.T) { "-state", statePath, "foo", } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } } @@ -336,11 +327,11 @@ func TestOutput_noVars(t *testing.T) { statePath := testStateFile(t, originalState) - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } @@ -348,8 +339,10 @@ func TestOutput_noVars(t *testing.T) { "-state", statePath, "bar", } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } } @@ -387,22 +380,24 @@ func TestOutput_stateDefault(t *testing.T) { } defer os.Chdir(cwd) - ui := new(cli.MockUi) + view, done := testView(t) c := &OutputCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(testProvider()), - Ui: ui, + View: view, }, } args := []string{ "foo", } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + code := c.Run(args) + output := done(t) + if code != 0 { + t.Fatalf("bad: \n%s", output.Stderr()) } - actual := strings.TrimSpace(ui.OutputWriter.String()) + actual := strings.TrimSpace(output.Stdout()) if actual != `"bar"` { t.Fatalf("bad: %#v", actual) } diff --git a/command/plan.go b/command/plan.go index dc578f42e..ce5ea0800 100644 --- a/command/plan.go +++ b/command/plan.go @@ -32,6 +32,13 @@ func (c *PlanCommand) Run(args []string) int { cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + diags := c.parseTargetFlags() + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } @@ -47,23 +54,6 @@ func (c *PlanCommand) Run(args []string) int { return 1 } - // Check if the path is a plan, which is not permitted - planFileReader, err := c.PlanFile(configPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if planFileReader != nil { - c.showDiagnostics(tfdiags.Sourceless( - tfdiags.Error, - "Invalid configuration directory", - fmt.Sprintf("Cannot pass a saved plan file to the 'terraform plan' command. To apply a saved plan, use: terraform apply %s", configPath), - )) - return 1 - } - - var diags tfdiags.Diagnostics - var backendConfig *configs.Backend var configDiags tfdiags.Diagnostics backendConfig, configDiags = c.loadBackendConfig(configPath) @@ -191,7 +181,7 @@ func (c *PlanCommand) Run(args []string) int { func (c *PlanCommand) Help() string { helpText := ` -Usage: terraform plan [options] [DIR] +Usage: terraform plan [options] Generates a speculative execution plan, showing what actions Terraform would take to apply the current configuration. This command will not diff --git a/command/plan_test.go b/command/plan_test.go index 7a99446a5..c9c09476e 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -4,6 +4,7 @@ import ( "bytes" "io/ioutil" "os" + "path" "path/filepath" "strings" "sync" @@ -98,6 +99,11 @@ func TestPlan_plan(t *testing.T) { } func TestPlan_destroy(t *testing.T) { + td := tempDir(t) + testCopyDir(t, testFixturePath("plan"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -131,7 +137,6 @@ func TestPlan_destroy(t *testing.T) { "-destroy", "-out", outPath, "-state", statePath, - testFixturePath("plan"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -146,8 +151,10 @@ func TestPlan_destroy(t *testing.T) { } func TestPlan_noState(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + td := tempDir(t) + testCopyDir(t, testFixturePath("plan"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() p := planFixtureProvider() ui := new(cli.MockUi) @@ -158,9 +165,7 @@ func TestPlan_noState(t *testing.T) { }, } - args := []string{ - testFixturePath("plan"), - } + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } @@ -172,17 +177,18 @@ func TestPlan_noState(t *testing.T) { // Verify that the provider was called with the existing state actual := p.PlanResourceChangeRequest.PriorState - expected := cty.NullVal(p.GetSchemaReturn.ResourceTypes["test_instance"].ImpliedType()) + expected := cty.NullVal(p.GetSchemaResponse.ResourceTypes["test_instance"].Block.ImpliedType()) if !expected.RawEquals(actual) { t.Fatalf("wrong prior state\ngot: %#v\nwant: %#v", actual, expected) } } func TestPlan_outPath(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + td := tempDir(t) + testCopyDir(t, testFixturePath("plan"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() - td := testTempDir(t) outPath := filepath.Join(td, "test.plan") p := planFixtureProvider() @@ -194,13 +200,12 @@ func TestPlan_outPath(t *testing.T) { }, } - p.PlanResourceChangeResponse = providers.PlanResourceChangeResponse{ + p.PlanResourceChangeResponse = &providers.PlanResourceChangeResponse{ PlannedState: cty.NullVal(cty.EmptyObject), } args := []string{ "-out", outPath, - testFixturePath("plan"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -210,6 +215,11 @@ func TestPlan_outPath(t *testing.T) { } func TestPlan_outPathNoChange(t *testing.T) { + td := tempDir(t) + testCopyDir(t, testFixturePath("plan"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -232,7 +242,6 @@ func TestPlan_outPathNoChange(t *testing.T) { }) statePath := testStateFile(t, originalState) - td := testTempDir(t) outPath := filepath.Join(td, "test.plan") p := planFixtureProvider() @@ -247,7 +256,6 @@ func TestPlan_outPathNoChange(t *testing.T) { args := []string{ "-out", outPath, "-state", statePath, - testFixturePath("plan"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -285,24 +293,26 @@ func TestPlan_outBackend(t *testing.T) { ) }) - // Setup our backend state + // Set up our backend state dataState, srv := testBackendState(t, originalState, 200) defer srv.Close() testStateFileRemote(t, dataState) outPath := "foo" p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - "ami": { - Type: cty.String, - Optional: true, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "ami": { + Type: cty.String, + Optional: true, + }, }, }, }, @@ -358,8 +368,11 @@ func TestPlan_outBackend(t *testing.T) { } func TestPlan_refreshFalse(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() p := planFixtureProvider() ui := new(cli.MockUi) @@ -372,7 +385,6 @@ func TestPlan_refreshFalse(t *testing.T) { args := []string{ "-refresh=false", - testFixturePath("plan"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -384,6 +396,12 @@ func TestPlan_refreshFalse(t *testing.T) { } func TestPlan_state(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := testState() statePath := testStateFile(t, originalState) @@ -398,7 +416,6 @@ func TestPlan_state(t *testing.T) { args := []string{ "-state", statePath, - testFixturePath("plan"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -420,18 +437,16 @@ func TestPlan_state(t *testing.T) { } func TestPlan_stateDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + // Generate state and move it to the default path originalState := testState() statePath := testStateFile(t, originalState) - - // Change to that directory - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(filepath.Dir(statePath)); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) + os.Rename(statePath, path.Join(td, "terraform.tfstate")) p := planFixtureProvider() ui := new(cli.MockUi) @@ -442,10 +457,7 @@ func TestPlan_stateDefault(t *testing.T) { }, } - args := []string{ - "-state", statePath, - testFixturePath("plan"), - } + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } @@ -476,11 +488,13 @@ func TestPlan_validate(t *testing.T) { defer testChdir(t, td)() p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, @@ -510,8 +524,11 @@ func TestPlan_validate(t *testing.T) { } func TestPlan_vars(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() p := planVarsFixtureProvider() ui := new(cli.MockUi) @@ -531,7 +548,6 @@ func TestPlan_vars(t *testing.T) { args := []string{ "-var", "foo=bar", - testFixturePath("plan-vars"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -543,8 +559,11 @@ func TestPlan_vars(t *testing.T) { } func TestPlan_varsUnset(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() // The plan command will prompt for interactive input of var.foo. // We'll answer "bar" to that prompt, which should then allow this @@ -565,9 +584,7 @@ func TestPlan_varsUnset(t *testing.T) { }, } - args := []string{ - testFixturePath("plan-vars"), - } + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } @@ -577,8 +594,11 @@ func TestPlan_varsUnset(t *testing.T) { // processing of user input: // https://github.com/hashicorp/terraform/issues/26035 func TestPlan_providerArgumentUnset(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() // Disable test mode so input would be asked test = false @@ -589,25 +609,29 @@ func TestPlan_providerArgumentUnset(t *testing.T) { p := planFixtureProvider() // override the planFixtureProvider schema to include a required provider argument - p.GetSchemaReturn = &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Required: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Required: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true, Computed: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.String, Optional: true}, - "description": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true, Computed: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -623,17 +647,18 @@ func TestPlan_providerArgumentUnset(t *testing.T) { }, } - args := []string{ - testFixturePath("plan"), - } + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } } func TestPlan_varFile(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() varFilePath := testTempFile(t) if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { @@ -658,7 +683,6 @@ func TestPlan_varFile(t *testing.T) { args := []string{ "-var-file", varFilePath, - testFixturePath("plan-vars"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -670,21 +694,17 @@ func TestPlan_varFile(t *testing.T) { } func TestPlan_varFileDefault(t *testing.T) { - varFileDir := testTempDir(t) - varFilePath := filepath.Join(varFileDir, "terraform.tfvars") + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + varFilePath := filepath.Join(td, "terraform.tfvars") if err := ioutil.WriteFile(varFilePath, []byte(planVarFile), 0644); err != nil { t.Fatalf("err: %s", err) } - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(varFileDir); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - p := planVarsFixtureProvider() ui := new(cli.MockUi) c := &PlanCommand{ @@ -701,9 +721,7 @@ func TestPlan_varFileDefault(t *testing.T) { return } - args := []string{ - testFixturePath("plan-vars"), - } + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } @@ -714,8 +732,11 @@ func TestPlan_varFileDefault(t *testing.T) { } func TestPlan_varFileWithDecls(t *testing.T) { - tmp, cwd := testCwd(t) - defer testFixCwd(t, tmp, cwd) + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("plan-vars"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() varFilePath := testTempFile(t) if err := ioutil.WriteFile(varFilePath, []byte(planVarFileWithDecl), 0644); err != nil { @@ -733,7 +754,6 @@ func TestPlan_varFileWithDecls(t *testing.T) { args := []string{ "-var-file", varFilePath, - testFixturePath("plan-vars"), } if code := c.Run(args); code == 0 { t.Fatalf("succeeded; want failure\n\n%s", ui.OutputWriter.String()) @@ -788,6 +808,12 @@ func TestPlan_detailedExitcode_emptyDiff(t *testing.T) { } func TestPlan_shutdown(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-shutdown"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + cancelled := make(chan struct{}) shutdownCh := make(chan struct{}) @@ -827,30 +853,21 @@ func TestPlan_shutdown(t *testing.T) { return } - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, } - code := c.Run([]string{ - // Unfortunately it seems like this test can inadvertently pick up - // leftover state from other tests without this. Ideally we should - // find which test is leaving a terraform.tfstate behind and stop it - // doing that, but this will stop this test flapping for now. - "-state=nonexistent.tfstate", - testFixturePath("apply-shutdown"), - }) - if code != 0 { - // FIXME: In retrospect cancellation ought to be an unsuccessful exit - // case, but we need to do that cautiously in case it impacts automation - // wrappers. See the note about this in the terraform.stopHook - // implementation for more. - t.Errorf("wrong exit code %d; want 0\noutput:\n%s", code, ui.OutputWriter.String()) + code := c.Run([]string{}) + if code != 1 { + t.Errorf("wrong exit code %d; want 1\noutput:\n%s", code, ui.OutputWriter.String()) } select { @@ -884,24 +901,110 @@ func TestPlan_init_required(t *testing.T) { } } +// Config with multiple resources, targeting plan of a subset +func TestPlan_targeted(t *testing.T) { + td := tempDir(t) + testCopyDir(t, testFixturePath("apply-targeted"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + p := testProvider() + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } + + ui := new(cli.MockUi) + c := &PlanCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + args := []string{ + "-target", "test_instance.foo", + "-target", "test_instance.baz", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + if got, want := ui.OutputWriter.String(), "3 to add, 0 to change, 0 to destroy"; !strings.Contains(got, want) { + t.Fatalf("bad change summary, want %q, got:\n%s", want, got) + } +} + +// Diagnostics for invalid -target flags +func TestPlan_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } + + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + c := &PlanCommand{ + Meta: Meta{ + Ui: ui, + }, + } + + args := []string{ + "-target", target, + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + got := ui.ErrorWriter.String() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) + } +} + // planFixtureSchema returns a schema suitable for processing the // configuration in testdata/plan . This schema should be // assigned to a mock provider named "test". -func planFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ +func planFixtureSchema() *providers.GetSchemaResponse { + return &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.String, Optional: true}, - "description": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -913,11 +1016,11 @@ func planFixtureSchema() *terraform.ProviderSchema { // planFixtureProvider returns a mock provider that is configured for basic // operation with the configuration in testdata/plan. This mock has -// GetSchemaReturn and PlanResourceChangeFn populated, with the plan +// GetSchemaResponse and PlanResourceChangeFn populated, with the plan // step just passing through the new object proposed by Terraform Core. func planFixtureProvider() *terraform.MockProvider { p := testProvider() - p.GetSchemaReturn = planFixtureSchema() + p.GetSchemaResponse = planFixtureSchema() p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: req.ProposedNewState, @@ -929,13 +1032,15 @@ func planFixtureProvider() *terraform.MockProvider { // planVarsFixtureSchema returns a schema suitable for processing the // configuration in testdata/plan-vars . This schema should be // assigned to a mock provider named "test". -func planVarsFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ +func planVarsFixtureSchema() *providers.GetSchemaResponse { + return &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "value": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -944,11 +1049,11 @@ func planVarsFixtureSchema() *terraform.ProviderSchema { // planVarsFixtureProvider returns a mock provider that is configured for basic // operation with the configuration in testdata/plan-vars. This mock has -// GetSchemaReturn and PlanResourceChangeFn populated, with the plan +// GetSchemaResponse and PlanResourceChangeFn populated, with the plan // step just passing through the new object proposed by Terraform Core. func planVarsFixtureProvider() *terraform.MockProvider { p := testProvider() - p.GetSchemaReturn = planVarsFixtureSchema() + p.GetSchemaResponse = planVarsFixtureSchema() p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: req.ProposedNewState, @@ -967,17 +1072,3 @@ foo = "bar" variable "nope" { } ` - -const testPlanNoStateStr = ` - -` - -const testPlanStateStr = ` -ID = bar -Tainted = false -` - -const testPlanStateDefaultStr = ` -ID = bar -Tainted = false -` diff --git a/command/plugins.go b/command/plugins.go index d93d4f752..f57de348a 100644 --- a/command/plugins.go +++ b/command/plugins.go @@ -9,16 +9,17 @@ import ( "os/exec" "path/filepath" "runtime" - "strings" plugin "github.com/hashicorp/go-plugin" "github.com/kardianos/osext" + fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file" + localexec "github.com/hashicorp/terraform/builtin/provisioners/local-exec" + remoteexec "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" "github.com/hashicorp/terraform/internal/logging" tfplugin "github.com/hashicorp/terraform/plugin" "github.com/hashicorp/terraform/plugin/discovery" "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/terraform" ) // NOTE WELL: The logic in this file is primarily about plugin types OTHER THAN @@ -120,7 +121,7 @@ func (m *Meta) pluginDirs(includeAutoInstalled bool) []string { return dirs } -func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory { +func (m *Meta) provisionerFactories() map[string]provisioners.Factory { dirs := m.pluginDirs(true) plugins := discovery.FindPlugins("provisioner", dirs) plugins, _ = plugins.ValidateVersions() @@ -131,12 +132,12 @@ func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory { // name here, even though the discovery interface forces us to pretend // that might not be true. - factories := make(map[string]terraform.ProvisionerFactory) + factories := make(map[string]provisioners.Factory) // Wire up the internal provisioners first. These might be overridden // by discovered provisioners below. - for name := range InternalProvisioners { - factories[name] = internalProvisionerFactory(discovery.PluginMeta{Name: name}) + for name, factory := range internalProvisionerFactories() { + factories[name] = factory } byName := plugins.ByName() @@ -152,30 +153,7 @@ func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory { return factories } -func internalPluginClient(kind, name string) (*plugin.Client, error) { - cmdLine, err := BuildPluginCommandString(kind, name) - if err != nil { - return nil, err - } - - // See the docstring for BuildPluginCommandString for why we need to do - // this split here. - cmdArgv := strings.Split(cmdLine, TFSPACE) - - cfg := &plugin.ClientConfig{ - Cmd: exec.Command(cmdArgv[0], cmdArgv[1:]...), - HandshakeConfig: tfplugin.Handshake, - Managed: true, - VersionedPlugins: tfplugin.VersionedPlugins, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - AutoMTLS: enableProviderAutoMTLS, - Logger: logging.NewLogger(kind), - } - - return plugin.NewClient(cfg), nil -} - -func provisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory { +func provisionerFactory(meta discovery.PluginMeta) provisioners.Factory { return func() (provisioners.Interface, error) { cfg := &plugin.ClientConfig{ Cmd: exec.Command(meta.Path), @@ -191,13 +169,11 @@ func provisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory } } -func internalProvisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory { - return func() (provisioners.Interface, error) { - client, err := internalPluginClient("provisioner", meta.Name) - if err != nil { - return nil, fmt.Errorf("[WARN] failed to build command line for internal plugin %q: %s", meta.Name, err) - } - return newProvisionerClient(client) +func internalProvisionerFactories() map[string]provisioners.Factory { + return map[string]provisioners.Factory{ + "file": provisioners.FactoryFixed(fileprovisioner.New()), + "local-exec": provisioners.FactoryFixed(localexec.New()), + "remote-exec": provisioners.FactoryFixed(remoteexec.New()), } } diff --git a/command/providers.go b/command/providers.go index d2042d122..da97ae158 100644 --- a/command/providers.go +++ b/command/providers.go @@ -82,6 +82,9 @@ func (c *ProvidersCommand) Run(args []string) int { return 1 } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + // Get the state env, err := c.Workspace() if err != nil { diff --git a/command/providers_schema.go b/command/providers_schema.go index 00634cf2f..3584be9f4 100644 --- a/command/providers_schema.go +++ b/command/providers_schema.go @@ -67,6 +67,9 @@ func (c *ProvidersSchemaCommand) Run(args []string) int { return 1 } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + // we expect that the config dir is the cwd cwd, err := os.Getwd() if err != nil { diff --git a/command/refresh.go b/command/refresh.go index 36c40633f..91dd28ce1 100644 --- a/command/refresh.go +++ b/command/refresh.go @@ -4,8 +4,9 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/views" "github.com/hashicorp/terraform/tfdiags" ) @@ -26,6 +27,13 @@ func (c *RefreshCommand) Run(args []string) int { cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) + return 1 + } + + diags := c.parseTargetFlags() + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } @@ -35,8 +43,6 @@ func (c *RefreshCommand) Run(args []string) int { return 1 } - var diags tfdiags.Diagnostics - // Check for user-supplied plugin path if c.pluginPath, err = c.loadPluginPath(); err != nil { c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) @@ -96,8 +102,13 @@ func (c *RefreshCommand) Run(args []string) int { return op.Result.ExitStatus() } - if outputs := outputsAsString(op.State, addrs.RootModuleInstance, true); outputs != "" { - c.Ui.Output(c.Colorize().Color(outputs)) + if op.State != nil { + outputValues := op.State.RootModule().OutputValues + if len(outputValues) > 0 { + c.Ui.Output(c.Colorize().Color("[reset][bold][green]\nOutputs:\n\n")) + view := views.NewOutput(arguments.ViewHuman, c.View) + view.Output("", outputValues) + } } return op.Result.ExitStatus() @@ -105,7 +116,7 @@ func (c *RefreshCommand) Run(args []string) int { func (c *RefreshCommand) Help() string { helpText := ` -Usage: terraform refresh [options] [dir] +Usage: terraform refresh [options] Update the state file of your infrastructure with metadata that matches the physical resources they are tracking. diff --git a/command/refresh_test.go b/command/refresh_test.go index af3bbba85..2175b0323 100644 --- a/command/refresh_test.go +++ b/command/refresh_test.go @@ -2,7 +2,6 @@ package command import ( "bytes" - "encoding/json" "fmt" "io/ioutil" "os" @@ -23,27 +22,34 @@ import ( "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statefile" "github.com/hashicorp/terraform/states/statemgr" - "github.com/hashicorp/terraform/terraform" ) var equateEmpty = cmpopts.EquateEmpty() func TestRefresh(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshFixtureSchema() + p.GetSchemaResponse = refreshFixtureSchema() p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), }), @@ -51,7 +57,6 @@ func TestRefresh(t *testing.T) { args := []string{ "-state", statePath, - testFixturePath("refresh"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -88,23 +93,23 @@ func TestRefresh_empty(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), }), } - args := []string{ - td, - } + args := []string{} if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } @@ -115,6 +120,12 @@ func TestRefresh_empty(t *testing.T) { } func TestRefresh_lockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) @@ -126,16 +137,18 @@ func TestRefresh_lockedState(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshFixtureSchema() + p.GetSchemaResponse = refreshFixtureSchema() p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), }), @@ -143,7 +156,6 @@ func TestRefresh_lockedState(t *testing.T) { args := []string{ "-state", statePath, - testFixturePath("refresh"), } if code := c.Run(args); code == 0 { @@ -171,16 +183,18 @@ func TestRefresh_cwd(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshFixtureSchema() + p.GetSchemaResponse = refreshFixtureSchema() p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), }), @@ -216,6 +230,12 @@ func TestRefresh_cwd(t *testing.T) { } func TestRefresh_defaultState(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + originalState := testState() // Write the state file in a temporary directory with the @@ -243,16 +263,18 @@ func TestRefresh_defaultState(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshFixtureSchema() + p.GetSchemaResponse = refreshFixtureSchema() p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), }), @@ -260,7 +282,6 @@ func TestRefresh_defaultState(t *testing.T) { args := []string{ "-state", statePath, - testFixturePath("refresh"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -292,6 +313,12 @@ func TestRefresh_defaultState(t *testing.T) { } func TestRefresh_outPath(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) @@ -306,16 +333,18 @@ func TestRefresh_outPath(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshFixtureSchema() + p.GetSchemaResponse = refreshFixtureSchema() p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), }), @@ -324,7 +353,6 @@ func TestRefresh_outPath(t *testing.T) { args := []string{ "-state", statePath, "-state-out", outPath, - testFixturePath("refresh"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -355,23 +383,30 @@ func TestRefresh_outPath(t *testing.T) { } func TestRefresh_var(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh-var"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshVarFixtureSchema() + p.GetSchemaResponse = refreshVarFixtureSchema() args := []string{ "-var", "foo=bar", "-state", statePath, - testFixturePath("refresh-var"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -386,18 +421,26 @@ func TestRefresh_var(t *testing.T) { } func TestRefresh_varFile(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh-var"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshVarFixtureSchema() + p.GetSchemaResponse = refreshVarFixtureSchema() varFilePath := testTempFile(t) if err := ioutil.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil { @@ -407,7 +450,6 @@ func TestRefresh_varFile(t *testing.T) { args := []string{ "-var-file", varFilePath, "-state", statePath, - testFixturePath("refresh-var"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -422,37 +464,34 @@ func TestRefresh_varFile(t *testing.T) { } func TestRefresh_varFileDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh-var"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshVarFixtureSchema() + p.GetSchemaResponse = refreshVarFixtureSchema() - varFileDir := testTempDir(t) - varFilePath := filepath.Join(varFileDir, "terraform.tfvars") + varFilePath := filepath.Join(td, "terraform.tfvars") if err := ioutil.WriteFile(varFilePath, []byte(refreshVarFile), 0644); err != nil { t.Fatalf("err: %s", err) } - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.Chdir(varFileDir); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Chdir(cwd) - args := []string{ "-state", statePath, - testFixturePath("refresh-var"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -467,6 +506,12 @@ func TestRefresh_varFileDefault(t *testing.T) { } func TestRefresh_varsUnset(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh-unset-var"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + // Disable test mode so input would be asked test = false defer func() { test = true }() @@ -478,18 +523,22 @@ func TestRefresh_varsUnset(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -497,7 +546,6 @@ func TestRefresh_varsUnset(t *testing.T) { args := []string{ "-state", statePath, - testFixturePath("refresh-unset-var"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -505,6 +553,12 @@ func TestRefresh_varsUnset(t *testing.T) { } func TestRefresh_backup(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) @@ -534,16 +588,18 @@ func TestRefresh_backup(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshFixtureSchema() + p.GetSchemaResponse = refreshFixtureSchema() p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("changed"), }), @@ -553,7 +609,6 @@ func TestRefresh_backup(t *testing.T) { "-state", statePath, "-state-out", outPath, "-backup", backupPath, - testFixturePath("refresh"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -584,6 +639,12 @@ func TestRefresh_backup(t *testing.T) { } func TestRefresh_disableBackup(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) @@ -598,16 +659,18 @@ func TestRefresh_disableBackup(t *testing.T) { p := testProvider() ui := new(cli.MockUi) + view, _ := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = refreshFixtureSchema() + p.GetSchemaResponse = refreshFixtureSchema() p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), }), @@ -617,7 +680,6 @@ func TestRefresh_disableBackup(t *testing.T) { "-state", statePath, "-state-out", outPath, "-backup", "-", - testFixturePath("refresh"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -653,23 +715,33 @@ func TestRefresh_disableBackup(t *testing.T) { } func TestRefresh_displaysOutputs(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh-output"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + state := testState() statePath := testStateFile(t, state) p := testProvider() ui := new(cli.MockUi) + view, done := testView(t) c := &RefreshCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), Ui: ui, + View: view, }, } - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -677,7 +749,6 @@ func TestRefresh_displaysOutputs(t *testing.T) { args := []string{ "-state", statePath, - testFixturePath("refresh-output"), } if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) @@ -685,40 +756,118 @@ func TestRefresh_displaysOutputs(t *testing.T) { // Test that outputs were displayed outputValue := "foo.example.com" - actual := ui.OutputWriter.String() + actual := done(t).Stdout() if !strings.Contains(actual, outputValue) { t.Fatalf("Expected:\n%s\n\nTo include: %q", actual, outputValue) } } -// newInstanceState creates a new states.ResourceInstanceObjectSrc with the -// given value for its single id attribute. It is named newInstanceState for -// historical reasons, because it was originally written for the poorly-named -// terraform.InstanceState type. -func newInstanceState(id string) *states.ResourceInstanceObjectSrc { - attrs := map[string]interface{}{ - "id": id, +// Config with multiple resources, targeting refresh of a subset +func TestRefresh_targeted(t *testing.T) { + td := tempDir(t) + testCopyDir(t, testFixturePath("refresh-targeted"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + state := testState() + statePath := testStateFile(t, state) + + p := testProvider() + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ + "test_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }, } - attrsJSON, err := json.Marshal(attrs) - if err != nil { - panic(fmt.Sprintf("failed to marshal attributes: %s", err)) // should never happen + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } } - return &states.ResourceInstanceObjectSrc{ - AttrsJSON: attrsJSON, - Status: states.ObjectReady, + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + View: view, + }, + } + + args := []string{ + "-target", "test_instance.foo", + "-state", statePath, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + got := ui.OutputWriter.String() + if want := "test_instance.foo: Refreshing"; !strings.Contains(got, want) { + t.Fatalf("expected output to contain %q, got:\n%s", want, got) + } + if doNotWant := "test_instance.bar: Refreshing"; strings.Contains(got, doNotWant) { + t.Fatalf("expected output not to contain %q, got:\n%s", doNotWant, got) + } +} + +// Diagnostics for invalid -target flags +func TestRefresh_targetFlagsDiags(t *testing.T) { + testCases := map[string]string{ + "test_instance.": "Dot must be followed by attribute name.", + "test_instance": "Resource specification must include a resource type and name.", + } + + for target, wantDiag := range testCases { + t.Run(target, func(t *testing.T) { + td := testTempDir(t) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + ui := new(cli.MockUi) + view, _ := testView(t) + c := &RefreshCommand{ + Meta: Meta{ + Ui: ui, + View: view, + }, + } + + args := []string{ + "-target", target, + } + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + got := ui.ErrorWriter.String() + if !strings.Contains(got, target) { + t.Fatalf("bad error output, want %q, got:\n%s", target, got) + } + if !strings.Contains(got, wantDiag) { + t.Fatalf("bad error output, want %q, got:\n%s", wantDiag, got) + } + }) } } -// refreshFixtureSchema returns a schema suitable for processing the // configuration in testdata/refresh . This schema should be // assigned to a mock provider named "test". -func refreshFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ +func refreshFixtureSchema() *providers.GetSchemaResponse { + return &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -728,17 +877,21 @@ func refreshFixtureSchema() *terraform.ProviderSchema { // refreshVarFixtureSchema returns a schema suitable for processing the // configuration in testdata/refresh-var . This schema should be // assigned to a mock provider named "test". -func refreshVarFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, +func refreshVarFixtureSchema() *providers.GetSchemaResponse { + return &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + }, }, }, }, diff --git a/command/show.go b/command/show.go index 56d0e34b9..db5efee10 100644 --- a/command/show.go +++ b/command/show.go @@ -68,6 +68,9 @@ func (c *ShowCommand) Run(args []string) int { return 1 } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + // the show command expects the config dir to always be the cwd cwd, err := os.Getwd() if err != nil { @@ -163,7 +166,7 @@ func (c *ShowCommand) Run(args []string) int { // package rather than in the backends themselves, but for now we're // accepting this oddity because "terraform show" is a less commonly // used way to render a plan than "terraform plan" is. - localBackend.RenderPlan(plan, stateFile.State, schemas, c.Ui, c.Colorize()) + localBackend.RenderPlan(plan, stateFile.State, schemas, c.Ui, c.Colorize(), c.OutputColumns()) return 0 } diff --git a/command/show_test.go b/command/show_test.go index 93f300d47..aec6e22ec 100644 --- a/command/show_test.go +++ b/command/show_test.go @@ -408,18 +408,22 @@ func TestShow_json_output_state(t *testing.T) { // showFixtureSchema returns a schema suitable for processing the configuration // in testdata/show. This schema should be assigned to a mock provider // named "test". -func showFixtureSchema() *terraform.ProviderSchema { - return &terraform.ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "region": {Type: cty.String, Optional: true}, +func showFixtureSchema() *providers.GetSchemaResponse { + return &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -428,12 +432,12 @@ func showFixtureSchema() *terraform.ProviderSchema { // showFixtureProvider returns a mock provider that is configured for basic // operation with the configuration in testdata/show. This mock has -// GetSchemaReturn, PlanResourceChangeFn, and ApplyResourceChangeFn populated, +// GetSchemaResponse, PlanResourceChangeFn, and ApplyResourceChangeFn populated, // with the plan/apply steps just passing through the data determined by // Terraform Core. func showFixtureProvider() *terraform.MockProvider { p := testProvider() - p.GetSchemaReturn = showFixtureSchema() + p.GetSchemaResponse = showFixtureSchema() p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { idVal := req.ProposedNewState.GetAttr("id") amiVal := req.ProposedNewState.GetAttr("ami") diff --git a/command/state_list.go b/command/state_list.go index 8c8a23906..5362ebf37 100644 --- a/command/state_list.go +++ b/command/state_list.go @@ -40,6 +40,9 @@ func (c *StateListCommand) Run(args []string) int { return 1 } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + // Get the state env, err := c.Workspace() if err != nil { @@ -58,7 +61,7 @@ func (c *StateListCommand) Run(args []string) int { state := stateMgr.State() if state == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) + c.Ui.Error(errStateNotFound) return 1 } diff --git a/command/state_list_test.go b/command/state_list_test.go index 1a275db27..e66a7968c 100644 --- a/command/state_list_test.go +++ b/command/state_list_test.go @@ -202,6 +202,75 @@ func TestStateList_noState(t *testing.T) { } } +func TestStateList_modules(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + testCopyDir(t, testFixturePath("state-list-nested-modules"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + p := testProvider() + ui := cli.NewMockUi() + c := &StateListCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + t.Run("list resources in module and submodules", func(t *testing.T) { + args := []string{"module.nest"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d", code) + } + + // resources in the module and any submodules should be included in the outputs + expected := "module.nest.test_instance.nest\nmodule.nest.module.subnest.test_instance.subnest\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } + }) + + t.Run("submodule has resources only", func(t *testing.T) { + // now get the state for a module that has no resources, only another nested module + ui.OutputWriter.Reset() + args := []string{"module.nonexist"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d", code) + } + expected := "module.nonexist.module.child.test_instance.child\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } + }) + + t.Run("expanded module", func(t *testing.T) { + // finally get the state for a module with an index + ui.OutputWriter.Reset() + args := []string{"module.count"} + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d", code) + } + expected := "module.count[0].test_instance.count\nmodule.count[1].test_instance.count\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } + }) + + t.Run("completely nonexistent module", func(t *testing.T) { + // finally get the state for a module with an index + ui.OutputWriter.Reset() + args := []string{"module.notevenalittlebit"} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + }) + +} + const testStateListOutput = ` test_instance.foo ` diff --git a/command/state_meta.go b/command/state_meta.go index bc70649ac..50ff5d380 100644 --- a/command/state_meta.go +++ b/command/state_meta.go @@ -41,6 +41,14 @@ func (c *StateMeta) State() (statemgr.Full, error) { if err != nil { return nil, err } + + // Check remote Terraform version is compatible + remoteVersionDiags := c.remoteBackendVersionCheck(b, workspace) + c.showDiagnostics(remoteVersionDiags) + if remoteVersionDiags.HasErrors() { + return nil, fmt.Errorf("Error checking remote Terraform version") + } + // Get the state s, err := b.StateMgr(workspace) if err != nil { @@ -95,24 +103,32 @@ func (c *StateMeta) lookupResourceInstanceAddr(state *states.State, allowMissing case addrs.ModuleInstance: // Matches all instances within the indicated module and all of its // descendent modules. + + // found is used to identify cases where the selected module has no + // resources, but one or more of its submodules does. + found := false ms := state.Module(addr) - if ms == nil { - if !allowMissing { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Unknown module", - fmt.Sprintf(`The current state contains no module at %s. If you've just added this module to the configuration, you must run "terraform apply" first to create the module's entry in the state.`, addr), - )) - } - break + if ms != nil { + found = true + ret = append(ret, c.collectModuleResourceInstances(ms)...) } - ret = append(ret, c.collectModuleResourceInstances(ms)...) for _, cms := range state.Modules { - candidateAddr := ms.Addr - if len(candidateAddr) > len(addr) && candidateAddr[:len(addr)].Equal(addr) { - ret = append(ret, c.collectModuleResourceInstances(cms)...) + if !addr.Equal(cms.Addr) { + if addr.IsAncestor(cms.Addr) || addr.TargetContains(cms.Addr) { + found = true + ret = append(ret, c.collectModuleResourceInstances(cms)...) + } } } + + if found == false && !allowMissing { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unknown module", + fmt.Sprintf(`The current state contains no module at %s. If you've just added this module to the configuration, you must run "terraform apply" first to create the module's entry in the state.`, addr), + )) + } + case addrs.AbsResource: // Matches all instances of the specific selected resource. rs := state.Resource(addr) diff --git a/command/state_mv.go b/command/state_mv.go index dcaaf23a6..451034199 100644 --- a/command/state_mv.go +++ b/command/state_mv.go @@ -23,7 +23,7 @@ func (c *StateMvCommand) Run(args []string) int { var backupPathOut, statePathOut string var dryRun bool - cmdFlags := c.Meta.defaultFlagSet("state mv") + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state mv") cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") cmdFlags.StringVar(&backupPathOut, "backup-out", "-", "backup") @@ -64,7 +64,7 @@ func (c *StateMvCommand) Run(args []string) int { stateFrom := stateFromMgr.State() if stateFrom == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) + c.Ui.Error(errStateNotFound) return 1 } @@ -139,7 +139,7 @@ func (c *StateMvCommand) Run(args []string) int { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidTarget, - fmt.Sprintf("Cannot move %s to %s: the target must also be a module.", addrFrom, addrTo), + fmt.Sprintf("Cannot move %s to %s: the target must also be a module.", addrFrom, destAddr), )) c.showDiagnostics(diags) return 1 @@ -184,7 +184,7 @@ func (c *StateMvCommand) Run(args []string) int { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidTarget, - fmt.Sprintf("Cannot move %s to %s: the target must also be a whole resource.", addrFrom, addrTo), + fmt.Sprintf("Cannot move %s to %s: the source is a whole resource (not a resource instance) so the target must also be a whole resource.", addrFrom, destAddr), )) c.showDiagnostics(diags) return 1 @@ -231,7 +231,7 @@ func (c *StateMvCommand) Run(args []string) int { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidTarget, - fmt.Sprintf("Cannot move %s to %s: the target must also be a resource instance.", addrFrom, addrTo), + fmt.Sprintf("Cannot move %s to %s: the target must also be a resource instance.", addrFrom, destAddr), )) c.showDiagnostics(diags) return 1 @@ -465,31 +465,35 @@ Usage: terraform state mv [options] SOURCE DESTINATION Options: - -dry-run If set, prints out what would've been moved but doesn't - actually move anything. + -dry-run If set, prints out what would've been moved but doesn't + actually move anything. - -backup=PATH Path where Terraform should write the backup for the original - state. This can't be disabled. If not set, Terraform - will write it to the same path as the statefile with - a ".backup" extension. + -backup=PATH Path where Terraform should write the backup for the + original state. This can't be disabled. If not set, + Terraform will write it to the same path as the + statefile with a ".backup" extension. - -backup-out=PATH Path where Terraform should write the backup for the destination - state. This can't be disabled. If not set, Terraform - will write it to the same path as the destination state - file with a backup extension. This only needs - to be specified if -state-out is set to a different path - than -state. + -backup-out=PATH Path where Terraform should write the backup for the + destination state. This can't be disabled. If not + set, Terraform will write it to the same path as the + destination state file with a backup extension. This + only needs to be specified if -state-out is set to a + different path than -state. - -lock=true Lock the state files when locking is supported. + -lock=true Lock the state files when locking is supported. - -lock-timeout=0s Duration to retry a state lock. + -lock-timeout=0s Duration to retry a state lock. - -state=PATH Path to the source state file. Defaults to the configured - backend, or "terraform.tfstate" + -state=PATH Path to the source state file. Defaults to the + configured backend, or "terraform.tfstate" - -state-out=PATH Path to the destination state file to write to. If this - isn't specified, the source state file will be used. This - can be a new or existing path. + -state-out=PATH Path to the destination state file to write to. If + this isn't specified, the source state file will be + used. This can be a new or existing path. + + -ignore-remote-version Continue even if remote and local Terraform versions + are incompatible. This may result in an unusable + workspace, and should be used with extreme caution. ` return strings.TrimSpace(helpText) diff --git a/command/state_mv_test.go b/command/state_mv_test.go index 7a631c44f..1685d01a2 100644 --- a/command/state_mv_test.go +++ b/command/state_mv_test.go @@ -7,12 +7,19 @@ import ( "strings" "testing" + "github.com/google/go-cmp/cmp" "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/states" ) +var disabledColorize = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, +} + func TestStateMv(t *testing.T) { state := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( @@ -148,6 +155,7 @@ func TestStateMv(t *testing.T) { } func TestStateMv_resourceToInstance(t *testing.T) { + // A single resource (no count defined) state := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -236,6 +244,141 @@ test_instance.baz: testStateOutput(t, backups[0], testStateMvOutputOriginal) } +func TestStateMv_resourceToInstanceErr(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceProvider( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance), + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := cli.NewMockUi() + + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_instance.bar[0]", + } + + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + expectedErr := ` +Error: Invalid target address + +Cannot move test_instance.foo to test_instance.bar[0]: the source is a whole +resource (not a resource instance) so the target must also be a whole +resource. + +` + errOutput := ui.ErrorWriter.String() + if errOutput != expectedErr { + t.Errorf("wrong output\n%s", cmp.Diff(errOutput, expectedErr)) + } +} + +func TestStateMv_resourceToInstanceErrInAutomation(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.IntKey(0)).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + s.SetResourceProvider( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance), + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + RunningInAutomation: true, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_instance.bar[0]", + } + + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + expectedErr := ` +Error: Invalid target address + +Cannot move test_instance.foo to test_instance.bar[0]: the source is a whole +resource (not a resource instance) so the target must also be a whole +resource. + +` + errOutput := ui.ErrorWriter.String() + if errOutput != expectedErr { + t.Errorf("Unexpected diff.\ngot:\n%s\nwant:\n%s\n", errOutput, expectedErr) + t.Errorf("%s", cmp.Diff(errOutput, expectedErr)) + } +} + func TestStateMv_instanceToResource(t *testing.T) { state := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( @@ -435,8 +578,16 @@ func TestStateMv_differentResourceTypes(t *testing.T) { t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) } - if !strings.Contains(ui.ErrorWriter.String(), "resource types don't match") { - t.Fatalf("expected initialization error, got:\n%s", ui.ErrorWriter.String()) + gotErr := ui.ErrorWriter.String() + wantErr := ` +Error: Invalid state move request + +Cannot move test_instance.foo to test_network.bar: resource types don't +match. + +` + if gotErr != wantErr { + t.Fatalf("expected initialization error\ngot:\n%s\n\nwant:%s", gotErr, wantErr) } } @@ -1243,6 +1394,13 @@ func TestStateMv_onlyResourceInModule(t *testing.T) { testStateOutput(t, backups[0], testStateMvOnlyResourceInModule_original) } +func TestStateMvHelp(t *testing.T) { + c := &StateMvCommand{} + if strings.ContainsRune(c.Help(), '\t') { + t.Fatal("help text contains tab character, which will result in poor formatting") + } +} + const testStateMvOutputOriginal = ` test_instance.baz: ID = foo diff --git a/command/state_pull.go b/command/state_pull.go index 6ab6328a2..f2f368348 100644 --- a/command/state_pull.go +++ b/command/state_pull.go @@ -22,7 +22,6 @@ func (c *StatePullCommand) Run(args []string) int { c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) return 1 } - args = cmdFlags.Args() // Load the backend b, backendDiags := c.Backend(nil) @@ -31,6 +30,9 @@ func (c *StatePullCommand) Run(args []string) int { return 1 } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + // Get the state manager for the current workspace env, err := c.Workspace() if err != nil { @@ -68,9 +70,13 @@ func (c *StatePullCommand) Help() string { helpText := ` Usage: terraform state pull [options] - Pull the state from its location and output it to stdout. + Pull the state from its location, upgrade the local copy, and output it + to stdout. This command "pulls" the current state and outputs it to stdout. + As part of this process, Terraform will upgrade the state format of the + local copy to the current version. + The primary use of this is for state stored remotely. This command will still work with local state but is less useful for this. diff --git a/command/state_push.go b/command/state_push.go index facbf786a..66bfbdaf7 100644 --- a/command/state_push.go +++ b/command/state_push.go @@ -22,7 +22,7 @@ type StatePushCommand struct { func (c *StatePushCommand) Run(args []string) int { args = c.Meta.process(args) var flagForce bool - cmdFlags := c.Meta.defaultFlagSet("state push") + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state push") cmdFlags.BoolVar(&flagForce, "force", false, "") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") @@ -71,13 +71,22 @@ func (c *StatePushCommand) Run(args []string) int { return 1 } - // Get the state manager for the currently-selected workspace - env, err := c.Workspace() + // Determine the workspace name + workspace, err := c.Workspace() if err != nil { c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) return 1 } - stateMgr, err := b.StateMgr(env) + + // Check remote Terraform version is compatible + remoteVersionDiags := c.remoteBackendVersionCheck(b, workspace) + c.showDiagnostics(remoteVersionDiags) + if remoteVersionDiags.HasErrors() { + return 1 + } + + // Get the state manager for the currently-selected workspace + stateMgr, err := b.StateMgr(workspace) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load destination state: %s", err)) return 1 diff --git a/command/state_push_test.go b/command/state_push_test.go index b0858469d..c04ed24bf 100644 --- a/command/state_push_test.go +++ b/command/state_push_test.go @@ -110,7 +110,7 @@ func TestStatePush_replaceMatchStdin(t *testing.T) { expected := testStateRead(t, "replace.tfstate") - // Setup the replacement to come from stdin + // Set up the replacement to come from stdin var buf bytes.Buffer if err := writeStateForTesting(expected, &buf); err != nil { t.Fatalf("err: %s", err) diff --git a/command/state_replace_provider.go b/command/state_replace_provider.go index 3d5acf678..750a90582 100644 --- a/command/state_replace_provider.go +++ b/command/state_replace_provider.go @@ -25,7 +25,7 @@ func (c *StateReplaceProviderCommand) Run(args []string) int { args = c.Meta.process(args) var autoApprove bool - cmdFlags := c.Meta.defaultFlagSet("state replace-provider") + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state replace-provider") cmdFlags.BoolVar(&autoApprove, "auto-approve", false, "skip interactive approval of replacements") cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock states") @@ -90,7 +90,7 @@ func (c *StateReplaceProviderCommand) Run(args []string) int { state := stateMgr.State() if state == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) + c.Ui.Error(errStateNotFound) return 1 } @@ -119,7 +119,7 @@ func (c *StateReplaceProviderCommand) Run(args []string) int { // Explain the changes colorize := c.Colorize() c.Ui.Output("Terraform will perform the following actions:\n") - c.Ui.Output(colorize.Color(fmt.Sprintf(" [yellow]~[reset] Updating provider:"))) + c.Ui.Output(colorize.Color(" [yellow]~[reset] Updating provider:")) c.Ui.Output(colorize.Color(fmt.Sprintf(" [red]-[reset] %s", from))) c.Ui.Output(colorize.Color(fmt.Sprintf(" [green]+[reset] %s\n", to))) @@ -134,7 +134,7 @@ func (c *StateReplaceProviderCommand) Run(args []string) int { "\n[bold]Do you want to make these changes?[reset]\n" + "Only 'yes' will be accepted to continue.\n", )) - v, err := c.Ui.Ask(fmt.Sprintf("Enter a value:")) + v, err := c.Ui.Ask("Enter a value:") if err != nil { c.Ui.Error(fmt.Sprintf("Error asking for approval: %s", err)) return 1 @@ -172,19 +172,24 @@ Usage: terraform state replace-provider [options] FROM_PROVIDER_FQN TO_PROVIDER_ Options: - -auto-approve Skip interactive approval. + -auto-approve Skip interactive approval. - -backup=PATH Path where Terraform should write the backup for the - state file. This can't be disabled. If not set, Terraform - will write it to the same path as the state file with - a ".backup" extension. + -backup=PATH Path where Terraform should write the backup for the + state file. This can't be disabled. If not set, + Terraform will write it to the same path as the state + file with a ".backup" extension. - -lock=true Lock the state files when locking is supported. + -lock=true Lock the state files when locking is supported. - -lock-timeout=0s Duration to retry a state lock. + -lock-timeout=0s Duration to retry a state lock. + + -state=PATH Path to the state file to update. Defaults to the + configured backend, or "terraform.tfstate" + + -ignore-remote-version Continue even if remote and local Terraform versions + are incompatible. This may result in an unusable + workspace, and should be used with extreme caution. - -state=PATH Path to the state file to update. Defaults to the configured - backend, or "terraform.tfstate" ` return strings.TrimSpace(helpText) } diff --git a/command/state_rm.go b/command/state_rm.go index 1254de417..14160bece 100644 --- a/command/state_rm.go +++ b/command/state_rm.go @@ -19,7 +19,7 @@ type StateRmCommand struct { func (c *StateRmCommand) Run(args []string) int { args = c.Meta.process(args) var dryRun bool - cmdFlags := c.Meta.defaultFlagSet("state rm") + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state rm") cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") @@ -59,7 +59,7 @@ func (c *StateRmCommand) Run(args []string) int { state := stateMgr.State() if state == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) + c.Ui.Error(errStateNotFound) return 1 } @@ -146,18 +146,22 @@ Usage: terraform state rm [options] ADDRESS... Options: - -dry-run If set, prints out what would've been removed but - doesn't actually remove anything. + -dry-run If set, prints out what would've been removed but + doesn't actually remove anything. - -backup=PATH Path where Terraform should write the backup - state. + -backup=PATH Path where Terraform should write the backup + state. - -lock=true Lock the state file when locking is supported. + -lock=true Lock the state file when locking is supported. - -lock-timeout=0s Duration to retry a state lock. + -lock-timeout=0s Duration to retry a state lock. - -state=PATH Path to the state file to update. Defaults to the current - workspace state. + -state=PATH Path to the state file to update. Defaults to the + current workspace state. + + -ignore-remote-version Continue even if remote and local Terraform versions + are incompatible. This may result in an unusable + workspace, and should be used with extreme caution. ` return strings.TrimSpace(helpText) diff --git a/command/state_show.go b/command/state_show.go index dd6e292bc..04569d673 100644 --- a/command/state_show.go +++ b/command/state_show.go @@ -53,6 +53,9 @@ func (c *StateShowCommand) Run(args []string) int { return 1 } + // This is a read-only command + c.ignoreRemoteBackendVersionConflict(b) + // Check if the address can be parsed addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) if addrDiags.HasErrors() { @@ -106,7 +109,7 @@ func (c *StateShowCommand) Run(args []string) int { state := stateMgr.State() if state == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) + c.Ui.Error(errStateNotFound) return 1 } diff --git a/command/state_show_test.go b/command/state_show_test.go index 0f649cb78..ad70cd5e7 100644 --- a/command/state_show_test.go +++ b/command/state_show_test.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" ) @@ -34,13 +33,15 @@ func TestStateShow(t *testing.T) { statePath := testStateFile(t, state) p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -107,13 +108,15 @@ func TestStateShow_multi(t *testing.T) { statePath := testStateFile(t, state) p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -213,13 +216,15 @@ func TestStateShow_configured_provider(t *testing.T) { statePath := testStateFile(t, state) p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true, Computed: true}, - "foo": {Type: cty.String, Optional: true}, - "bar": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, }, }, }, diff --git a/command/taint.go b/command/taint.go index ad4f10f06..443831112 100644 --- a/command/taint.go +++ b/command/taint.go @@ -21,14 +21,12 @@ type TaintCommand struct { func (c *TaintCommand) Run(args []string) int { args = c.Meta.process(args) - var module string var allowMissing bool - cmdFlags := c.Meta.defaultFlagSet("taint") - cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "module") + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("taint") + cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "allow missing") cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.StringVar(&module, "module", "", "module") cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } @@ -47,11 +45,6 @@ func (c *TaintCommand) Run(args []string) int { return 1 } - if module != "" { - c.Ui.Error("The -module option is no longer used. Instead, include the module path in the main resource address, like \"module.foo.module.bar.null_resource.baz\".") - return 1 - } - addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) diags = diags.Append(addrDiags) if addrDiags.HasErrors() { @@ -100,13 +93,23 @@ func (c *TaintCommand) Run(args []string) int { return 1 } - // Get the state - env, err := c.Workspace() + // Determine the workspace name + workspace, err := c.Workspace() if err != nil { c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) return 1 } - stateMgr, err := b.StateMgr(env) + + // Check remote Terraform version is compatible + remoteVersionDiags := c.remoteBackendVersionCheck(b, workspace) + diags = diags.Append(remoteVersionDiags) + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + // Get the state + stateMgr, err := b.StateMgr(workspace) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 @@ -224,22 +227,26 @@ Usage: terraform taint [options]
Options: - -allow-missing If specified, the command will succeed (exit code 0) - even if the resource is missing. + -allow-missing If specified, the command will succeed (exit code 0) + even if the resource is missing. - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. + -backup=path Path to backup the existing state file before + modifying. Defaults to the "-state-out" path with + ".backup" extension. Set to "-" to disable backup. - -lock=true Lock the state file when locking is supported. + -lock=true Lock the state file when locking is supported. - -lock-timeout=0s Duration to retry a state lock. + -lock-timeout=0s Duration to retry a state lock. - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". + -state=path Path to read and save state (unless state-out + is specified). Defaults to "terraform.tfstate". - -state-out=path Path to write updated state file. By default, the - "-state" path will be used. + -state-out=path Path to write updated state file. By default, the + "-state" path will be used. + + -ignore-remote-version Continue even if remote and local Terraform versions + are incompatible. This may result in an unusable + workspace, and should be used with extreme caution. ` return strings.TrimSpace(helpText) @@ -253,7 +260,7 @@ func (c *TaintCommand) allowMissingExit(name addrs.AbsResourceInstance) int { c.showDiagnostics(tfdiags.Sourceless( tfdiags.Warning, "No such resource instance", - "Resource instance %s was not found, but this is not an error because -allow-missing was set.", + fmt.Sprintf("Resource instance %s was not found, but this is not an error because -allow-missing was set.", name), )) return 0 } diff --git a/command/taint_test.go b/command/taint_test.go index e09a0c336..2e91a88b7 100644 --- a/command/taint_test.go +++ b/command/taint_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/google/go-cmp/cmp" "github.com/mitchellh/cli" "github.com/hashicorp/terraform/addrs" @@ -357,6 +358,19 @@ func TestTaint_missingAllow(t *testing.T) { if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } + + // Check for the warning + actual := strings.TrimSpace(ui.ErrorWriter.String()) + expected := strings.TrimSpace(` +Warning: No such resource instance + +Resource instance test_instance.bar was not found, but this is not an error +because -allow-missing was set. + +`) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong output\n%s", diff) + } } func TestTaint_stateOut(t *testing.T) { diff --git a/command/testdata/013upgrade-implicit-not-found/expected/versions.tf b/command/testdata/013upgrade-implicit-not-found/expected/versions.tf index 795fe47ef..a95ff090f 100644 --- a/command/testdata/013upgrade-implicit-not-found/expected/versions.tf +++ b/command/testdata/013upgrade-implicit-not-found/expected/versions.tf @@ -10,7 +10,7 @@ terraform { # # For more information, see the provider source documentation: # - # https://www.terraform.io/docs/configuration/providers.html#provider-source + # https://www.terraform.io/docs/language/providers/requirements.html } } required_version = ">= 0.13" diff --git a/command/testdata/013upgrade-provider-not-found/expected/main.tf b/command/testdata/013upgrade-provider-not-found/expected/main.tf index 951db47d7..e3aa388c8 100644 --- a/command/testdata/013upgrade-provider-not-found/expected/main.tf +++ b/command/testdata/013upgrade-provider-not-found/expected/main.tf @@ -16,7 +16,7 @@ terraform { # # For more information, see the provider source documentation: # - # https://www.terraform.io/docs/configuration/providers.html#provider-source + # https://www.terraform.io/docs/language/providers/requirements.html version = "~> 2.0.0" } foo = { diff --git a/command/testdata/apply-destroy-targeted/main.tf b/command/testdata/apply-destroy-targeted/main.tf index 45ebc5b97..0f249b384 100644 --- a/command/testdata/apply-destroy-targeted/main.tf +++ b/command/testdata/apply-destroy-targeted/main.tf @@ -3,5 +3,5 @@ resource "test_instance" "foo" { } resource "test_load_balancer" "foo" { - instances = ["${test_instance.foo.*.id}"] + instances = test_instance.foo.*.id } diff --git a/command/testdata/apply-targeted/main.tf b/command/testdata/apply-targeted/main.tf new file mode 100644 index 000000000..1b6c42450 --- /dev/null +++ b/command/testdata/apply-targeted/main.tf @@ -0,0 +1,9 @@ +resource "test_instance" "foo" { + count = 2 +} + +resource "test_instance" "bar" { +} + +resource "test_instance" "baz" { +} diff --git a/command/testdata/fmt/general_in.tf b/command/testdata/fmt/general_in.tf index 44432f973..0ee143731 100644 --- a/command/testdata/fmt/general_in.tf +++ b/command/testdata/fmt/general_in.tf @@ -39,3 +39,6 @@ resource "foo_instance" /* ... */ "baz" { thingy = "${var.instance_type}" } } + + provider "" { +} diff --git a/command/testdata/fmt/general_out.tf b/command/testdata/fmt/general_out.tf index 4b1bc489b..974646ebd 100644 --- a/command/testdata/fmt/general_out.tf +++ b/command/testdata/fmt/general_out.tf @@ -39,3 +39,6 @@ resource "foo_instance" "baz" { thingy = var.instance_type } } + +provider "" { +} diff --git a/command/testdata/refresh-targeted/main.tf b/command/testdata/refresh-targeted/main.tf new file mode 100644 index 000000000..734f58549 --- /dev/null +++ b/command/testdata/refresh-targeted/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + id = "foo" +} + +resource "test_instance" "bar" { + id = "bar" +} diff --git a/command/testdata/show-json-state/sensitive-variables/output.json b/command/testdata/show-json-state/sensitive-variables/output.json new file mode 100644 index 000000000..a4e74aa37 --- /dev/null +++ b/command/testdata/show-json-state/sensitive-variables/output.json @@ -0,0 +1,22 @@ +{ + "format_version": "0.1", + "terraform_version": "0.14.0", + "values": { + "root_module": { + "resources": [ + { + "address": "test_instance.test", + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider_name": "registry.terraform.io/hashicorp/test", + "schema_version": 0, + "values": { + "id": "621124146446964903", + "ami": "abc" + } + } + ] + } + } +} diff --git a/command/testdata/show-json-state/sensitive-variables/terraform.tfstate b/command/testdata/show-json-state/sensitive-variables/terraform.tfstate new file mode 100644 index 000000000..55712452a --- /dev/null +++ b/command/testdata/show-json-state/sensitive-variables/terraform.tfstate @@ -0,0 +1,33 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 1, + "lineage": "d7a6880b-6875-288f-13a9-696a65c73036", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "test", + "provider": "provider[\"registry.terraform.io/hashicorp/test\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "id": "621124146446964903", + "ami": "abc" + }, + "sensitive_attributes": [ + [ + { + "type": "get_attr", + "value": "ami" + } + ] + ], + "private": "bnVsbA==" + } + ] + } + ] +} diff --git a/command/testdata/state-list-nested-modules/terraform.tfstate b/command/testdata/state-list-nested-modules/terraform.tfstate new file mode 100644 index 000000000..3e4689a82 --- /dev/null +++ b/command/testdata/state-list-nested-modules/terraform.tfstate @@ -0,0 +1,91 @@ +{ + "version": 4, + "terraform_version": "0.15.0", + "serial": 8, + "lineage": "00bfda35-ad61-ec8d-c013-14b0320bc416", + "resources": [ + { + "mode": "managed", + "type": "test_instance", + "name": "root", + "provider": "provider[\"registry.terraform.io/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "root" + } + } + ] + }, + { + "module": "module.nest", + "mode": "managed", + "type": "test_instance", + "name": "nest", + "provider": "provider[\"registry.terraform.io/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "ami": "nested" + } + } + ] + }, + { + "module": "module.nest.module.subnest", + "mode": "managed", + "type": "test_instance", + "name": "subnest", + "provider": "provider[\"registry.terraform.io/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "subnested" + } + } + ] + }, + { + "module": "module.nonexist.module.child", + "mode": "managed", + "type": "test_instance", + "name": "child", + "provider": "provider[\"registry.terraform.io/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "child" + } + } + ] + }, + { + "module": "module.count[0]", + "mode": "managed", + "type": "test_instance", + "name": "count", + "provider": "provider[\"registry.terraform.io/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "zero" + } + } + ] + }, + { + "module": "module.count[1]", + "mode": "managed", + "type": "test_instance", + "name": "count", + "provider": "provider[\"registry.terraform.io/hashicorp/test\"]", + "instances": [ + { + "attributes": { + "id": "one" + } + } + ] + } + ] +} diff --git a/command/testdata/validate-invalid/incorrectmodulename/output.json b/command/testdata/validate-invalid/incorrectmodulename/output.json new file mode 100644 index 000000000..6fb8b2cad --- /dev/null +++ b/command/testdata/validate-invalid/incorrectmodulename/output.json @@ -0,0 +1,133 @@ +{ + "valid": false, + "error_count": 6, + "warning_count": 1, + "diagnostics": [ + { + "severity": "error", + "summary": "Missing required argument", + "detail": "The argument \"source\" is required, but no definition was found.", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 1, + "column": 23, + "byte": 22 + }, + "end": { + "line": 1, + "column": 23, + "byte": 22 + } + } + }, + { + "severity": "error", + "summary": "Invalid module instance name", + "detail": "A name must start with a letter or underscore and may contain only letters, digits, underscores, and dashes.", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 1, + "column": 8, + "byte": 7 + }, + "end": { + "line": 1, + "column": 22, + "byte": 21 + } + } + }, + { + "severity": "warning", + "summary": "Interpolation-only expressions are deprecated", + "detail": "Terraform 0.11 and earlier required all non-constant expressions to be provided via interpolation syntax, but this pattern is now deprecated. To silence this warning, remove the \"${ sequence from the start and the }\" sequence from the end of this expression, leaving just the inner expression.\n\nTemplate interpolation syntax is still used to construct strings from expressions when the template includes multiple interpolation sequences or a mixture of literal strings and interpolations. This deprecation applies only to templates that consist entirely of a single interpolation sequence.", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 5, + "column": 12, + "byte": 55 + }, + "end": { + "line": 5, + "column": 31, + "byte": 74 + } + } + }, + { + "severity": "error", + "summary": "Variables not allowed", + "detail": "Variables may not be used here.", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 5, + "column": 15, + "byte": 58 + }, + "end": { + "line": 5, + "column": 18, + "byte": 61 + } + } + }, + { + "severity": "error", + "summary": "Unsuitable value type", + "detail": "Unsuitable value: value must be known", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 5, + "column": 12, + "byte": 55 + }, + "end": { + "line": 5, + "column": 31, + "byte": 74 + } + } + }, + { + "severity": "error", + "summary": "Module not installed", + "detail": "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 4, + "column": 1, + "byte": 27 + }, + "end": { + "line": 4, + "column": 15, + "byte": 41 + } + } + }, + { + "severity": "error", + "summary": "Module not installed", + "detail": "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", + "range": { + "filename": "testdata/validate-invalid/incorrectmodulename/main.tf", + "start": { + "line": 1, + "column": 1, + "byte": 0 + }, + "end": { + "line": 1, + "column": 22, + "byte": 21 + } + } + } + ] +} diff --git a/command/testdata/validate-invalid/interpolation/output.json b/command/testdata/validate-invalid/interpolation/output.json new file mode 100644 index 000000000..01c7815e9 --- /dev/null +++ b/command/testdata/validate-invalid/interpolation/output.json @@ -0,0 +1,43 @@ +{ + "valid": false, + "error_count": 2, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Variables not allowed", + "detail": "Variables may not be used here.", + "range": { + "filename": "testdata/validate-invalid/interpolation/main.tf", + "start": { + "line": 6, + "column": 16, + "byte": 122 + }, + "end": { + "line": 6, + "column": 19, + "byte": 125 + } + } + }, + { + "severity": "error", + "summary": "Invalid expression", + "detail": "A single static variable reference is required: only attribute access and indexing with constant keys. No calculations, function calls, template expressions, etc are allowed here.", + "range": { + "filename": "testdata/validate-invalid/interpolation/main.tf", + "start": { + "line": 10, + "column": 17, + "byte": 197 + }, + "end": { + "line": 10, + "column": 44, + "byte": 224 + } + } + } + ] +} diff --git a/command/testdata/validate-invalid/missing_defined_var/output.json b/command/testdata/validate-invalid/missing_defined_var/output.json new file mode 100644 index 000000000..ac2d30361 --- /dev/null +++ b/command/testdata/validate-invalid/missing_defined_var/output.json @@ -0,0 +1,6 @@ +{ + "valid": true, + "error_count": 0, + "warning_count": 0, + "diagnostics": [] +} diff --git a/command/testdata/validate-invalid/missing_quote/output.json b/command/testdata/validate-invalid/missing_quote/output.json new file mode 100644 index 000000000..0d1abbbbc --- /dev/null +++ b/command/testdata/validate-invalid/missing_quote/output.json @@ -0,0 +1,25 @@ +{ + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Invalid reference", + "detail": "A reference to a resource type must be followed by at least one attribute access, specifying the resource name.", + "range": { + "filename": "testdata/validate-invalid/missing_quote/main.tf", + "start": { + "line": 6, + "column": 14, + "byte": 110 + }, + "end": { + "line": 6, + "column": 18, + "byte": 114 + } + } + } + ] +} diff --git a/command/testdata/validate-invalid/missing_var/output.json b/command/testdata/validate-invalid/missing_var/output.json new file mode 100644 index 000000000..677e5f634 --- /dev/null +++ b/command/testdata/validate-invalid/missing_var/output.json @@ -0,0 +1,43 @@ +{ + "valid": false, + "error_count": 1, + "warning_count": 1, + "diagnostics": [ + { + "severity": "warning", + "summary": "Interpolation-only expressions are deprecated", + "detail": "Terraform 0.11 and earlier required all non-constant expressions to be provided via interpolation syntax, but this pattern is now deprecated. To silence this warning, remove the \"${ sequence from the start and the }\" sequence from the end of this expression, leaving just the inner expression.\n\nTemplate interpolation syntax is still used to construct strings from expressions when the template includes multiple interpolation sequences or a mixture of literal strings and interpolations. This deprecation applies only to templates that consist entirely of a single interpolation sequence.", + "range": { + "filename": "testdata/validate-invalid/missing_var/main.tf", + "start": { + "line": 6, + "column": 21, + "byte": 117 + }, + "end": { + "line": 6, + "column": 41, + "byte": 137 + } + } + }, + { + "severity": "error", + "summary": "Reference to undeclared input variable", + "detail": "An input variable with the name \"description\" has not been declared. This variable can be declared with a variable \"description\" {} block.", + "range": { + "filename": "testdata/validate-invalid/missing_var/main.tf", + "start": { + "line": 6, + "column": 24, + "byte": 120 + }, + "end": { + "line": 6, + "column": 39, + "byte": 135 + } + } + } + ] +} diff --git a/command/testdata/validate-invalid/multiple_modules/output.json b/command/testdata/validate-invalid/multiple_modules/output.json new file mode 100644 index 000000000..b6ece4bf3 --- /dev/null +++ b/command/testdata/validate-invalid/multiple_modules/output.json @@ -0,0 +1,43 @@ +{ + "valid": false, + "error_count": 2, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Duplicate module call", + "detail": "A module call named \"multi_module\" was already defined at testdata/validate-invalid/multiple_modules/main.tf:1,1-22. Module calls must have unique names within a module.", + "range": { + "filename": "testdata/validate-invalid/multiple_modules/main.tf", + "start": { + "line": 5, + "column": 1, + "byte": 46 + }, + "end": { + "line": 5, + "column": 22, + "byte": 67 + } + } + }, + { + "severity": "error", + "summary": "Module not installed", + "detail": "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", + "range": { + "filename": "testdata/validate-invalid/multiple_modules/main.tf", + "start": { + "line": 5, + "column": 1, + "byte": 46 + }, + "end": { + "line": 5, + "column": 22, + "byte": 67 + } + } + } + ] +} diff --git a/command/testdata/validate-invalid/multiple_providers/output.json b/command/testdata/validate-invalid/multiple_providers/output.json new file mode 100644 index 000000000..836882790 --- /dev/null +++ b/command/testdata/validate-invalid/multiple_providers/output.json @@ -0,0 +1,25 @@ +{ + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Duplicate provider configuration", + "detail": "A default (non-aliased) provider configuration for \"aws\" was already given at testdata/validate-invalid/multiple_providers/main.tf:1,1-15. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", + "range": { + "filename": "testdata/validate-invalid/multiple_providers/main.tf", + "start": { + "line": 7, + "column": 1, + "byte": 85 + }, + "end": { + "line": 7, + "column": 15, + "byte": 99 + } + } + } + ] +} diff --git a/command/testdata/validate-invalid/multiple_resources/output.json b/command/testdata/validate-invalid/multiple_resources/output.json new file mode 100644 index 000000000..f1f0a8b52 --- /dev/null +++ b/command/testdata/validate-invalid/multiple_resources/output.json @@ -0,0 +1,25 @@ +{ + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Duplicate resource \"aws_instance\" configuration", + "detail": "A aws_instance resource named \"web\" was already declared at testdata/validate-invalid/multiple_resources/main.tf:1,1-30. Resource names must be unique per type in each module.", + "range": { + "filename": "testdata/validate-invalid/multiple_resources/main.tf", + "start": { + "line": 4, + "column": 1, + "byte": 35 + }, + "end": { + "line": 4, + "column": 30, + "byte": 64 + } + } + } + ] +} diff --git a/command/testdata/validate-invalid/output.json b/command/testdata/validate-invalid/output.json new file mode 100644 index 000000000..3eebf7634 --- /dev/null +++ b/command/testdata/validate-invalid/output.json @@ -0,0 +1,25 @@ +{ + "valid": false, + "error_count": 1, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Unsupported block type", + "detail": "Blocks of type \"resorce\" are not expected here. Did you mean \"resource\"?", + "range": { + "filename": "testdata/validate-invalid/main.tf", + "start": { + "line": 1, + "column": 1, + "byte": 0 + }, + "end": { + "line": 1, + "column": 8, + "byte": 7 + } + } + } + ] +} diff --git a/command/testdata/validate-invalid/outputs/output.json b/command/testdata/validate-invalid/outputs/output.json new file mode 100644 index 000000000..454ad3cac --- /dev/null +++ b/command/testdata/validate-invalid/outputs/output.json @@ -0,0 +1,43 @@ +{ + "valid": false, + "error_count": 2, + "warning_count": 0, + "diagnostics": [ + { + "severity": "error", + "summary": "Missing required argument", + "detail": "The argument \"value\" is required, but no definition was found.", + "range": { + "filename": "testdata/validate-invalid/outputs/main.tf", + "start": { + "line": 1, + "column": 18, + "byte": 17 + }, + "end": { + "line": 1, + "column": 18, + "byte": 17 + } + } + }, + { + "severity": "error", + "summary": "Unsupported argument", + "detail": "An argument named \"values\" is not expected here. Did you mean \"value\"?", + "range": { + "filename": "testdata/validate-invalid/outputs/main.tf", + "start": { + "line": 2, + "column": 3, + "byte": 21 + }, + "end": { + "line": 2, + "column": 9, + "byte": 27 + } + } + } + ] +} diff --git a/command/testdata/validate-valid/output.json b/command/testdata/validate-valid/output.json new file mode 100644 index 000000000..ac2d30361 --- /dev/null +++ b/command/testdata/validate-valid/output.json @@ -0,0 +1,6 @@ +{ + "valid": true, + "error_count": 0, + "warning_count": 0, + "diagnostics": [] +} diff --git a/command/unlock.go b/command/unlock.go index 6376fd0f6..fd4b4f36b 100644 --- a/command/unlock.go +++ b/command/unlock.go @@ -30,8 +30,8 @@ func (c *UnlockCommand) Run(args []string) int { } args = cmdFlags.Args() - if len(args) == 0 { - c.Ui.Error("unlock requires a lock id argument") + if len(args) != 1 { + c.Ui.Error("Expected a single argument: LOCK_ID") return cli.RunResultHelp } @@ -116,7 +116,7 @@ func (c *UnlockCommand) Run(args []string) int { func (c *UnlockCommand) Help() string { helpText := ` -Usage: terraform force-unlock LOCK_ID [DIR] +Usage: terraform force-unlock LOCK_ID Manually unlock the state for the defined configuration. diff --git a/command/unlock_test.go b/command/unlock_test.go index 66b4d3e8c..70f14c65a 100644 --- a/command/unlock_test.go +++ b/command/unlock_test.go @@ -5,8 +5,9 @@ import ( "testing" "github.com/hashicorp/terraform/backend/remote-state/inmem" - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" + + legacy "github.com/hashicorp/terraform/internal/legacy/terraform" ) // Since we can't unlock a local state file, just test that calling unlock @@ -24,7 +25,7 @@ func TestUnlock(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - err = terraform.WriteState(terraform.NewState(), f) + err = legacy.WriteState(legacy.NewState(), f) f.Close() if err != nil { t.Fatalf("err: %s", err) @@ -55,7 +56,7 @@ func TestUnlock(t *testing.T) { "-force", } - if code := c.Run(args); code != 1 { + if code := c.Run(args); code != cli.RunResultHelp { t.Fatalf("bad: %d\n%s\n%s", code, ui.OutputWriter.String(), ui.ErrorWriter.String()) } } diff --git a/command/untaint.go b/command/untaint.go index 96493bcb0..bbc93294d 100644 --- a/command/untaint.go +++ b/command/untaint.go @@ -19,14 +19,12 @@ type UntaintCommand struct { func (c *UntaintCommand) Run(args []string) int { args = c.Meta.process(args) - var module string var allowMissing bool - cmdFlags := c.Meta.defaultFlagSet("untaint") - cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "module") + cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("untaint") + cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "allow missing") cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") - cmdFlags.StringVar(&module, "module", "", "module") cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } @@ -45,11 +43,6 @@ func (c *UntaintCommand) Run(args []string) int { return 1 } - if module != "" { - c.Ui.Error("The -module option is no longer used. Instead, include the module path in the main resource address, like \"module.foo.module.bar.null_resource.baz\".") - return 1 - } - addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) diags = diags.Append(addrDiags) if addrDiags.HasErrors() { @@ -65,12 +58,22 @@ func (c *UntaintCommand) Run(args []string) int { return 1 } - // Get the state + // Determine the workspace name workspace, err := c.Workspace() if err != nil { c.Ui.Error(fmt.Sprintf("Error selecting workspace: %s", err)) return 1 } + + // Check remote Terraform version is compatible + remoteVersionDiags := c.remoteBackendVersionCheck(b, workspace) + diags = diags.Append(remoteVersionDiags) + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + // Get the state stateMgr, err := b.StateMgr(workspace) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) @@ -189,26 +192,26 @@ Usage: terraform untaint [options] name Options: - -allow-missing If specified, the command will succeed (exit code 0) - even if the resource is missing. + -allow-missing If specified, the command will succeed (exit code 0) + even if the resource is missing. - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. + -backup=path Path to backup the existing state file before + modifying. Defaults to the "-state-out" path with + ".backup" extension. Set to "-" to disable backup. - -lock=true Lock the state file when locking is supported. + -lock=true Lock the state file when locking is supported. - -lock-timeout=0s Duration to retry a state lock. + -lock-timeout=0s Duration to retry a state lock. - -module=path The module path where the resource lives. By - default this will be root. Child modules can be specified - by names. Ex. "consul" or "consul.vpc" (nested modules). + -state=path Path to read and save state (unless state-out + is specified). Defaults to "terraform.tfstate". - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". + -state-out=path Path to write updated state file. By default, the + "-state" path will be used. - -state-out=path Path to write updated state file. By default, the - "-state" path will be used. + -ignore-remote-version Continue even if remote and local Terraform versions + are incompatible. This may result in an unusable + workspace, and should be used with extreme caution. ` return strings.TrimSpace(helpText) @@ -222,7 +225,7 @@ func (c *UntaintCommand) allowMissingExit(name addrs.AbsResourceInstance) int { c.showDiagnostics(tfdiags.Sourceless( tfdiags.Warning, "No such resource instance", - "Resource instance %s was not found, but this is not an error because -allow-missing was set.", + fmt.Sprintf("Resource instance %s was not found, but this is not an error because -allow-missing was set.", name), )) return 0 } diff --git a/command/untaint_test.go b/command/untaint_test.go index 3ef0dbf75..b1b365fdb 100644 --- a/command/untaint_test.go +++ b/command/untaint_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/states" "github.com/mitchellh/cli" @@ -385,6 +386,19 @@ func TestUntaint_missingAllow(t *testing.T) { if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } + + // Check for the warning + actual := strings.TrimSpace(ui.ErrorWriter.String()) + expected := strings.TrimSpace(` +Warning: No such resource instance + +Resource instance test_instance.bar was not found, but this is not an error +because -allow-missing was set. + +`) + if diff := cmp.Diff(expected, actual); diff != "" { + t.Fatalf("wrong output\n%s", diff) + } } func TestUntaint_stateOut(t *testing.T) { diff --git a/command/validate.go b/command/validate.go index 6519a5db9..687bbb8c8 100644 --- a/command/validate.go +++ b/command/validate.go @@ -81,7 +81,7 @@ func (c *ValidateCommand) Run(args []string) int { // not be valid for a stable release, so we'll warn about that in case // the user is trying to use "terraform validate" as a sort of pre-flight // check before submitting a change. - diags = diags.Append(c.providerDevOverrideWarnings()) + diags = diags.Append(c.providerDevOverrideRuntimeWarnings()) return c.showResults(diags, jsonOutput) } diff --git a/command/validate_test.go b/command/validate_test.go index cc5242f2d..65515900b 100644 --- a/command/validate_test.go +++ b/command/validate_test.go @@ -1,33 +1,40 @@ package command import ( + "encoding/json" + "io/ioutil" "os" + "path" "strings" "testing" + "github.com/google/go-cmp/cmp" "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/providers" ) func setupTest(fixturepath string, args ...string) (*cli.MockUi, int) { ui := new(cli.MockUi) p := testProvider() - p.GetSchemaReturn = &terraform.ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "device_index": {Type: cty.String, Optional: true}, - "description": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "device_index": {Type: cty.String, Optional: true}, + "description": {Type: cty.String, Optional: true}, + "name": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -83,29 +90,25 @@ func TestValidateFailingCommand(t *testing.T) { } func TestValidateFailingCommandMissingQuote(t *testing.T) { - // FIXME: Re-enable once we've updated core for new data structures - t.Skip("test temporarily disabled until deep validate supports new config structures") - ui, code := setupTest("validate-invalid/missing_quote") if code != 1 { t.Fatalf("Should have failed: %d\n\n%s", code, ui.ErrorWriter.String()) } - if !strings.HasSuffix(strings.TrimSpace(ui.ErrorWriter.String()), "IDENT test") { - t.Fatalf("Should have failed: %d\n\n'%s'", code, ui.ErrorWriter.String()) + wantError := "Error: Invalid reference" + if !strings.Contains(ui.ErrorWriter.String(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, ui.ErrorWriter.String()) } } func TestValidateFailingCommandMissingVariable(t *testing.T) { - // FIXME: Re-enable once we've updated core for new data structures - t.Skip("test temporarily disabled until deep validate supports new config structures") - ui, code := setupTest("validate-invalid/missing_var") if code != 1 { t.Fatalf("Should have failed: %d\n\n%s", code, ui.ErrorWriter.String()) } - if !strings.HasSuffix(strings.TrimSpace(ui.ErrorWriter.String()), "config: unknown variable referenced: 'description'; define it with a 'variable' block") { - t.Fatalf("Should have failed: %d\n\n'%s'", code, ui.ErrorWriter.String()) + wantError := "Error: Reference to undeclared input variable" + if !strings.Contains(ui.ErrorWriter.String(), wantError) { + t.Fatalf("Missing error string %q\n\n'%s'", wantError, ui.ErrorWriter.String()) } } @@ -197,3 +200,65 @@ func TestMissingDefinedVar(t *testing.T) { t.Fatalf("Should have passed: %d\n\n%s", code, ui.ErrorWriter.String()) } } + +func TestValidate_json(t *testing.T) { + tests := []struct { + path string + valid bool + }{ + {"validate-valid", true}, + {"validate-invalid", false}, + {"validate-invalid/missing_quote", false}, + {"validate-invalid/missing_var", false}, + {"validate-invalid/multiple_providers", false}, + {"validate-invalid/multiple_modules", false}, + {"validate-invalid/multiple_resources", false}, + {"validate-invalid/outputs", false}, + {"validate-invalid/incorrectmodulename", false}, + {"validate-invalid/interpolation", false}, + {"validate-invalid/missing_defined_var", true}, + } + + for _, tc := range tests { + t.Run(tc.path, func(t *testing.T) { + var want, got map[string]interface{} + + wantFile, err := os.Open(path.Join(testFixturePath(tc.path), "output.json")) + if err != nil { + t.Fatalf("failed to open output file: %s", err) + } + defer wantFile.Close() + wantBytes, err := ioutil.ReadAll(wantFile) + if err != nil { + t.Fatalf("failed to read output file: %s", err) + } + err = json.Unmarshal([]byte(wantBytes), &want) + if err != nil { + t.Fatalf("failed to unmarshal expected JSON: %s", err) + } + + ui, code := setupTest(tc.path, "-json") + + gotString := ui.OutputWriter.String() + err = json.Unmarshal([]byte(gotString), &got) + if err != nil { + t.Fatalf("failed to unmarshal actual JSON: %s", err) + } + + if !cmp.Equal(got, want) { + t.Errorf("wrong output:\n %v\n", cmp.Diff(got, want)) + t.Errorf("raw output:\n%s\n", gotString) + } + + if tc.valid && code != 0 { + t.Errorf("wrong exit code: want 0, got %d", code) + } else if !tc.valid && code != 1 { + t.Errorf("wrong exit code: want 1, got %d", code) + } + + if errorOutput := ui.ErrorWriter.String(); errorOutput != "" { + t.Errorf("unexpected error output:\n%s", errorOutput) + } + }) + } +} diff --git a/command/version.go b/command/version.go index cbece20ac..27dbbff43 100644 --- a/command/version.go +++ b/command/version.go @@ -9,21 +9,22 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/internal/depsfile" + "github.com/hashicorp/terraform/internal/getproviders" ) // VersionCommand is a Command implementation prints the version. type VersionCommand struct { Meta - Revision string Version string VersionPrerelease string CheckFunc VersionCheckFunc + Platform getproviders.Platform } type VersionOutput struct { Version string `json:"terraform_version"` - Revision string `json:"terraform_revision"` + Platform string `json:"platform"` ProviderSelections map[string]string `json:"provider_selections"` Outdated bool `json:"terraform_outdated"` } @@ -77,10 +78,6 @@ func (c *VersionCommand) Run(args []string) int { fmt.Fprintf(&versionString, "Terraform v%s", c.Version) if c.VersionPrerelease != "" { fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) - - if c.Revision != "" { - fmt.Fprintf(&versionString, " (%s)", c.Revision) - } } // We'll also attempt to print out the selected plugin versions. We do @@ -136,7 +133,7 @@ func (c *VersionCommand) Run(args []string) int { output := VersionOutput{ Version: versionOutput, - Revision: c.Revision, + Platform: c.Platform.String(), ProviderSelections: selectionsOutput, Outdated: outdated, } @@ -150,6 +147,8 @@ func (c *VersionCommand) Run(args []string) int { return 0 } else { c.Ui.Output(versionString.String()) + c.Ui.Output(fmt.Sprintf("on %s", c.Platform)) + if len(providerVersions) != 0 { sort.Strings(providerVersions) for _, str := range providerVersions { diff --git a/command/version_test.go b/command/version_test.go index 3844dbd12..10f384c45 100644 --- a/command/version_test.go +++ b/command/version_test.go @@ -49,6 +49,7 @@ func TestVersion(t *testing.T) { }, Version: "4.5.6", VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, } if err := c.replaceLockedDependencies(locks); err != nil { t.Fatal(err) @@ -58,7 +59,7 @@ func TestVersion(t *testing.T) { } actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := "Terraform v4.5.6-foo\n+ provider registry.terraform.io/hashicorp/test1 v7.8.9-beta.2\n+ provider registry.terraform.io/hashicorp/test2 v1.2.3" + expected := "Terraform v4.5.6-foo\non aros_riscv64\n+ provider registry.terraform.io/hashicorp/test1 v7.8.9-beta.2\n+ provider registry.terraform.io/hashicorp/test2 v1.2.3" if actual != expected { t.Fatalf("wrong output\ngot:\n%s\nwant:\n%s", actual, expected) } @@ -76,6 +77,7 @@ func TestVersion_flags(t *testing.T) { Meta: m, Version: "4.5.6", VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, } if code := c.Run([]string{"-v", "-version"}); code != 0 { @@ -83,7 +85,7 @@ func TestVersion_flags(t *testing.T) { } actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := "Terraform v4.5.6-foo" + expected := "Terraform v4.5.6-foo\non aros_riscv64" if actual != expected { t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) } @@ -99,6 +101,7 @@ func TestVersion_outdated(t *testing.T) { Meta: m, Version: "4.5.6", CheckFunc: mockVersionCheckFunc(true, "4.5.7"), + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, } if code := c.Run([]string{}); code != 0 { @@ -106,7 +109,7 @@ func TestVersion_outdated(t *testing.T) { } actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := "Terraform v4.5.6\n\nYour version of Terraform is out of date! The latest version\nis 4.5.7. You can update by downloading from https://www.terraform.io/downloads.html" + expected := "Terraform v4.5.6\non aros_riscv64\n\nYour version of Terraform is out of date! The latest version\nis 4.5.7. You can update by downloading from https://www.terraform.io/downloads.html" if actual != expected { t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) } @@ -127,8 +130,9 @@ func TestVersion_json(t *testing.T) { // `terraform version -json` without prerelease c := &VersionCommand{ - Meta: meta, - Version: "4.5.6", + Meta: meta, + Version: "4.5.6", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, } if code := c.Run([]string{"-json"}); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) @@ -138,7 +142,7 @@ func TestVersion_json(t *testing.T) { expected := strings.TrimSpace(` { "terraform_version": "4.5.6", - "terraform_revision": "", + "platform": "aros_riscv64", "provider_selections": {}, "terraform_outdated": false } @@ -172,6 +176,7 @@ func TestVersion_json(t *testing.T) { Meta: meta, Version: "4.5.6", VersionPrerelease: "foo", + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, } if err := c.replaceLockedDependencies(locks); err != nil { t.Fatal(err) @@ -184,7 +189,7 @@ func TestVersion_json(t *testing.T) { expected = strings.TrimSpace(` { "terraform_version": "4.5.6-foo", - "terraform_revision": "", + "platform": "aros_riscv64", "provider_selections": { "registry.terraform.io/hashicorp/test1": "7.8.9-beta.2", "registry.terraform.io/hashicorp/test2": "1.2.3" @@ -208,6 +213,7 @@ func TestVersion_jsonoutdated(t *testing.T) { Meta: m, Version: "4.5.6", CheckFunc: mockVersionCheckFunc(true, "4.5.7"), + Platform: getproviders.Platform{OS: "aros", Arch: "riscv64"}, } if code := c.Run([]string{"-json"}); code != 0 { @@ -215,7 +221,7 @@ func TestVersion_jsonoutdated(t *testing.T) { } actual := strings.TrimSpace(ui.OutputWriter.String()) - expected := "{\n \"terraform_version\": \"4.5.6\",\n \"terraform_revision\": \"\",\n \"provider_selections\": {},\n \"terraform_outdated\": true\n}" + expected := "{\n \"terraform_version\": \"4.5.6\",\n \"platform\": \"aros_riscv64\",\n \"provider_selections\": {},\n \"terraform_outdated\": true\n}" if actual != expected { t.Fatalf("wrong output\ngot: %#v\nwant: %#v", actual, expected) } diff --git a/command/views/output.go b/command/views/output.go new file mode 100644 index 000000000..e3c998ef6 --- /dev/null +++ b/command/views/output.go @@ -0,0 +1,278 @@ +package views + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// The Output view renders either one or all outputs, depending on whether or +// not the name argument is empty. +type Output interface { + Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics + Diagnostics(diags tfdiags.Diagnostics) +} + +// NewOutput returns an initialized Output implementation for the given ViewType. +func NewOutput(vt arguments.ViewType, view *View) Output { + switch vt { + case arguments.ViewJSON: + return &OutputJSON{View: *view} + case arguments.ViewRaw: + return &OutputRaw{View: *view} + case arguments.ViewHuman: + return &OutputHuman{View: *view} + default: + panic(fmt.Sprintf("unknown view type %v", vt)) + } +} + +// The OutputHuman implementation renders outputs in a format equivalent to HCL +// source. This uses the same formatting logic as in the console REPL. +type OutputHuman struct { + View +} + +var _ Output = (*OutputHuman)(nil) + +func (v *OutputHuman) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(outputs) == 0 { + diags = diags.Append(noOutputsWarning()) + return diags + } + + if name != "" { + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + result := repl.FormatValue(output.Value, 0) + v.streams.Println(result) + return nil + } + + outputBuf := new(bytes.Buffer) + if len(outputs) > 0 { + // Output the outputs in alphabetical order + keyLen := 0 + ks := make([]string, 0, len(outputs)) + for key := range outputs { + ks = append(ks, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(ks) + + for _, k := range ks { + v := outputs[k] + if v.Sensitive { + outputBuf.WriteString(fmt.Sprintf("%s = \n", k)) + continue + } + + result := repl.FormatValue(v.Value, 0) + outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, result)) + } + } + + v.streams.Println(strings.TrimSpace(outputBuf.String())) + + return nil +} + +// The OutputRaw implementation renders single string, number, or boolean +// output values directly and without quotes or other formatting. This is +// intended for use in shell scripting or other environments where the exact +// type of an output value is not important. +type OutputRaw struct { + View + + // Unit tests may set rawPrint to capture the output from the Output + // method, which would normally go to stdout directly. + rawPrint func(string) +} + +var _ Output = (*OutputRaw)(nil) + +func (v *OutputRaw) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(outputs) == 0 { + diags = diags.Append(noOutputsWarning()) + return diags + } + + if name == "" { + diags = diags.Append(fmt.Errorf("Raw output format is only supported for single outputs")) + return diags + } + + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + + strV, err := convert.Convert(output.Value, cty.String) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The -raw option only supports strings, numbers, and boolean values, but output value %q is %s.\n\nUse the -json option for machine-readable representations of output values that have complex types.", + name, output.Value.Type().FriendlyName(), + ), + )) + return diags + } + if strV.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The value for output value %q is null, so -raw mode cannot print it.", + name, + ), + )) + return diags + } + if !strV.IsKnown() { + // Since we're working with values from the state it would be very + // odd to end up in here, but we'll handle it anyway to avoid a + // panic in case our rules somehow change in future. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported value for raw output", + fmt.Sprintf( + "The value for output value %q won't be known until after a successful terraform apply, so -raw mode cannot print it.", + name, + ), + )) + return diags + } + // If we get out here then we should have a valid string to print. + // We're writing it directly to the output here so that a shell caller + // will get exactly the value and no extra whitespace. + str := strV.AsString() + fmt.Fprint(v.streams.Stdout.File, str) + return nil +} + +// The OutputJSON implementation renders outputs as JSON values. When rendering +// a single output, only the value is displayed. When rendering all outputs, +// the result is a JSON object with keys matching the output names and object +// values including type and sensitivity metadata. +type OutputJSON struct { + View +} + +var _ Output = (*OutputJSON)(nil) + +func (v *OutputJSON) Output(name string, outputs map[string]*states.OutputValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if name != "" { + output, ok := outputs[name] + if !ok { + diags = diags.Append(missingOutputError(name)) + return diags + } + value := output.Value + + jsonOutput, err := ctyjson.Marshal(value, value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + + v.streams.Println(string(jsonOutput)) + + return nil + } + + // Due to a historical accident, the switch from state version 2 to + // 3 caused our JSON output here to be the full metadata about the + // outputs rather than just the output values themselves as we'd + // show in the single value case. We must now maintain that behavior + // for compatibility, so this is an emulation of the JSON + // serialization of outputs used in state format version 3. + type OutputMeta struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type"` + Value json.RawMessage `json:"value"` + } + outputMetas := map[string]OutputMeta{} + + for n, os := range outputs { + jsonVal, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + jsonType, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(err) + return diags + } + outputMetas[n] = OutputMeta{ + Sensitive: os.Sensitive, + Type: json.RawMessage(jsonType), + Value: json.RawMessage(jsonVal), + } + } + + jsonOutputs, err := json.MarshalIndent(outputMetas, "", " ") + if err != nil { + diags = diags.Append(err) + return diags + } + + v.streams.Println(string(jsonOutputs)) + + return nil +} + +// For text and raw output modes, an empty map of outputs is considered a +// separate and higher priority failure mode than an output not being present +// in a non-empty map. This warning diagnostic explains how this might have +// happened. +func noOutputsWarning() tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Warning, + "No outputs found", + "The state file either has no outputs defined, or all the defined "+ + "outputs are empty. Please define an output in your configuration "+ + "with the `output` keyword and run `terraform refresh` for it to "+ + "become available. If you are using interpolation, please verify "+ + "the interpolated value is not empty. You can use the "+ + "`terraform console` command to assist.", + ) +} + +// Attempting to display a missing output results in this failure, which +// includes suggestions on how to rectify the problem. +func missingOutputError(name string) tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Output %q not found", name), + "The output variable requested could not be found in the state "+ + "file. If you recently added this to your configuration, be "+ + "sure to run `terraform apply`, since the state won't be updated "+ + "with new output variables until that command is run.", + ) +} diff --git a/command/views/output_test.go b/command/views/output_test.go new file mode 100644 index 000000000..2cfa95a33 --- /dev/null +++ b/command/views/output_test.go @@ -0,0 +1,60 @@ +package views + +import ( + "testing" + + "github.com/hashicorp/terraform/internal/terminal" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" +) + +func TestOutputRaw(t *testing.T) { + values := map[string]cty.Value{ + "str": cty.StringVal("bar"), + "multistr": cty.StringVal("bar\nbaz"), + "num": cty.NumberIntVal(2), + "bool": cty.True, + "obj": cty.EmptyObjectVal, + "null": cty.NullVal(cty.String), + } + + tests := map[string]struct { + WantOutput string + WantErr bool + }{ + "str": {WantOutput: "bar"}, + "multistr": {WantOutput: "bar\nbaz"}, + "num": {WantOutput: "2"}, + "bool": {WantOutput: "true"}, + "obj": {WantErr: true}, + "null": {WantErr: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + streams, done := terminal.StreamsForTesting(t) + view := NewView(streams) + v := &OutputRaw{ + View: *view, + } + + value := values[name] + outputs := map[string]*states.OutputValue{ + name: {Value: value}, + } + diags := v.Output(name, outputs) + + if diags.HasErrors() { + if !test.WantErr { + t.Fatalf("unexpected diagnostics: %s", diags) + } + } else if test.WantErr { + t.Fatalf("succeeded, but want error") + } + + if got, want := done(t).Stdout(), test.WantOutput; got != want { + t.Errorf("wrong result\ngot: %q\nwant: %q", got, want) + } + }) + } +} diff --git a/command/views/view.go b/command/views/view.go new file mode 100644 index 000000000..57977a243 --- /dev/null +++ b/command/views/view.go @@ -0,0 +1,114 @@ +package views + +import ( + "github.com/hashicorp/terraform/command/arguments" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/internal/terminal" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/colorstring" +) + +// View is the base layer for command views, encapsulating a set of I/O +// streams, a colorize implementation, and implementing a human friendly view +// for diagnostics. +type View struct { + streams *terminal.Streams + colorize *colorstring.Colorize + + // NOTE: compactWarnings is currently always false. When implementing + // views for commands which support this flag, we will need to address this. + compactWarnings bool + + // This unfortunate wart is required to enable rendering of diagnostics which + // have associated source code in the configuration. This function pointer + // will be dereferenced as late as possible when rendering diagnostics in + // order to access the config loader cache. + configSources func() map[string][]byte +} + +// Initialize a View with the given streams, a disabled colorize object, and a +// no-op configSources callback. +func NewView(streams *terminal.Streams) *View { + return &View{ + streams: streams, + colorize: &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + Reset: true, + }, + configSources: func() map[string][]byte { return nil }, + } +} + +// Configure applies the global view configuration flags. +func (v *View) Configure(view *arguments.View) { + v.colorize.Disable = view.NoColor + v.compactWarnings = view.CompactWarnings +} + +// SetConfigSources overrides the default no-op callback with a new function +// pointer, and should be called when the config loader is initialized. +func (v *View) SetConfigSources(cb func() map[string][]byte) { + v.configSources = cb +} + +// Diagnostics renders a set of warnings and errors in human-readable form. +// Warnings are printed to stdout, and errors to stderr. +func (v *View) Diagnostics(diags tfdiags.Diagnostics) { + diags.Sort() + + if len(diags) == 0 { + return + } + + diags = diags.ConsolidateWarnings(1) + + // Since warning messages are generally competing + if v.compactWarnings { + // If the user selected compact warnings and all of the diagnostics are + // warnings then we'll use a more compact representation of the warnings + // that only includes their summaries. + // We show full warnings if there are also errors, because a warning + // can sometimes serve as good context for a subsequent error. + useCompact := true + for _, diag := range diags { + if diag.Severity() != tfdiags.Warning { + useCompact = false + break + } + } + if useCompact { + msg := format.DiagnosticWarningsCompact(diags, v.colorize) + msg = "\n" + msg + "\nTo see the full warning notes, run Terraform without -compact-warnings.\n" + v.streams.Print(msg) + return + } + } + + for _, diag := range diags { + var msg string + if v.colorize.Disable { + msg = format.DiagnosticPlain(diag, v.configSources(), v.streams.Stderr.Columns()) + } else { + msg = format.Diagnostic(diag, v.configSources(), v.colorize, v.streams.Stderr.Columns()) + } + + if diag.Severity() == tfdiags.Error { + v.streams.Eprint(msg) + } else { + v.streams.Print(msg) + } + } +} + +// HelpPrompt is intended to be called from commands which fail to parse all +// of their CLI arguments successfully. It refers users to the full help output +// rather than rendering it directly, which can be overwhelming and confusing. +func (v *View) HelpPrompt(command string) { + v.streams.Eprintf(helpPrompt, command) +} + +const helpPrompt = ` +For more help on using this command, run: + terraform %s -help +` diff --git a/command/webbrowser/native.go b/command/webbrowser/native.go index 77d503a2c..4e8281ce1 100644 --- a/command/webbrowser/native.go +++ b/command/webbrowser/native.go @@ -2,8 +2,6 @@ package webbrowser import ( "github.com/pkg/browser" - "os/exec" - "strings" ) // NewNativeLauncher creates and returns a Launcher that will attempt to interact @@ -15,18 +13,6 @@ func NewNativeLauncher() Launcher { type nativeLauncher struct{} -func hasProgram(name string) bool { - _, err := exec.LookPath(name) - return err == nil -} - func (l nativeLauncher) OpenURL(url string) error { - // Windows Subsystem for Linux (bash for Windows) doesn't have xdg-open available - // but you can execute cmd.exe from there; try to identify it - if !hasProgram("xdg-open") && hasProgram("cmd.exe") { - r := strings.NewReplacer("&", "^&") - exec.Command("cmd.exe", "/c", "start", r.Replace(url)).Run() - } - return browser.OpenURL(url) } diff --git a/command/workspace_command.go b/command/workspace_command.go index 3de83dbcc..443884e6d 100644 --- a/command/workspace_command.go +++ b/command/workspace_command.go @@ -15,7 +15,7 @@ type WorkspaceCommand struct { } func (c *WorkspaceCommand) Run(args []string) int { - args = c.Meta.process(args) + c.Meta.process(args) envCommandShowWarning(c.Ui, c.LegacyName) cmdFlags := c.Meta.extendedFlagSet("workspace") diff --git a/command/workspace_command_test.go b/command/workspace_command_test.go index df4076b4c..0ffc7b8fe 100644 --- a/command/workspace_command_test.go +++ b/command/workspace_command_test.go @@ -13,8 +13,9 @@ import ( "github.com/hashicorp/terraform/backend/remote-state/inmem" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statemgr" - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" + + legacy "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestWorkspace_createAndChange(t *testing.T) { @@ -379,14 +380,14 @@ func TestWorkspace_deleteWithState(t *testing.T) { } // create a non-empty state - originalState := &terraform.State{ - Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ + originalState := &legacy.State{ + Modules: []*legacy.ModuleState{ + &legacy.ModuleState{ Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test_instance.foo": &terraform.ResourceState{ + Resources: map[string]*legacy.ResourceState{ + "test_instance.foo": &legacy.ResourceState{ Type: "test_instance", - Primary: &terraform.InstanceState{ + Primary: &legacy.InstanceState{ ID: "bar", }, }, @@ -400,7 +401,7 @@ func TestWorkspace_deleteWithState(t *testing.T) { t.Fatal(err) } defer f.Close() - if err := terraform.WriteState(originalState, f); err != nil { + if err := legacy.WriteState(originalState, f); err != nil { t.Fatal(err) } diff --git a/command/workspace_delete.go b/command/workspace_delete.go index ebb7c5eed..578ac77a8 100644 --- a/command/workspace_delete.go +++ b/command/workspace_delete.go @@ -35,8 +35,8 @@ func (c *WorkspaceDeleteCommand) Run(args []string) int { } args = cmdFlags.Args() - if len(args) == 0 { - c.Ui.Error("expected NAME.\n") + if len(args) != 1 { + c.Ui.Error("Expected a single argument: NAME.\n") return cli.RunResultHelp } @@ -65,6 +65,9 @@ func (c *WorkspaceDeleteCommand) Run(args []string) int { return 1 } + // This command will not write state + c.ignoreRemoteBackendVersionConflict(b) + workspaces, err := b.Workspaces() if err != nil { c.Ui.Error(err.Error()) @@ -179,7 +182,7 @@ func (c *WorkspaceDeleteCommand) AutocompleteFlags() complete.Flags { func (c *WorkspaceDeleteCommand) Help() string { helpText := ` -Usage: terraform workspace delete [OPTIONS] NAME [DIR] +Usage: terraform workspace delete [OPTIONS] NAME Delete a Terraform workspace diff --git a/command/workspace_list.go b/command/workspace_list.go index 03fdf4a50..51c345ed6 100644 --- a/command/workspace_list.go +++ b/command/workspace_list.go @@ -51,6 +51,9 @@ func (c *WorkspaceListCommand) Run(args []string) int { return 1 } + // This command will not write state + c.ignoreRemoteBackendVersionConflict(b) + states, err := b.Workspaces() if err != nil { c.Ui.Error(err.Error()) @@ -88,7 +91,7 @@ func (c *WorkspaceListCommand) AutocompleteFlags() complete.Flags { func (c *WorkspaceListCommand) Help() string { helpText := ` -Usage: terraform workspace list [DIR] +Usage: terraform workspace list List Terraform workspaces. diff --git a/command/workspace_new.go b/command/workspace_new.go index 549beebae..def5e50a8 100644 --- a/command/workspace_new.go +++ b/command/workspace_new.go @@ -37,7 +37,7 @@ func (c *WorkspaceNewCommand) Run(args []string) int { } args = cmdFlags.Args() - if len(args) == 0 { + if len(args) != 1 { c.Ui.Error("Expected a single argument: NAME.\n") return cli.RunResultHelp } @@ -81,6 +81,9 @@ func (c *WorkspaceNewCommand) Run(args []string) int { return 1 } + // This command will not write state + c.ignoreRemoteBackendVersionConflict(b) + workspaces, err := b.Workspaces() if err != nil { c.Ui.Error(fmt.Sprintf("Failed to get configured named states: %s", err)) @@ -173,7 +176,7 @@ func (c *WorkspaceNewCommand) AutocompleteFlags() complete.Flags { func (c *WorkspaceNewCommand) Help() string { helpText := ` -Usage: terraform workspace new [OPTIONS] NAME [DIR] +Usage: terraform workspace new [OPTIONS] NAME Create a new Terraform workspace. diff --git a/command/workspace_select.go b/command/workspace_select.go index 9667ff9dc..801853bdb 100644 --- a/command/workspace_select.go +++ b/command/workspace_select.go @@ -26,7 +26,7 @@ func (c *WorkspaceSelectCommand) Run(args []string) int { } args = cmdFlags.Args() - if len(args) == 0 { + if len(args) != 1 { c.Ui.Error("Expected a single argument: NAME.\n") return cli.RunResultHelp } @@ -67,6 +67,9 @@ func (c *WorkspaceSelectCommand) Run(args []string) int { return 1 } + // This command will not write state + c.ignoreRemoteBackendVersionConflict(b) + name := args[0] if !validWorkspaceName(name) { c.Ui.Error(fmt.Sprintf(envInvalidName, name)) @@ -126,7 +129,7 @@ func (c *WorkspaceSelectCommand) AutocompleteFlags() complete.Flags { func (c *WorkspaceSelectCommand) Help() string { helpText := ` -Usage: terraform workspace select NAME [DIR] +Usage: terraform workspace select NAME Select a different Terraform workspace. diff --git a/commands.go b/commands.go index dd6010ce1..b694c6acf 100644 --- a/commands.go +++ b/commands.go @@ -13,8 +13,10 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/command" "github.com/hashicorp/terraform/command/cliconfig" + "github.com/hashicorp/terraform/command/views" "github.com/hashicorp/terraform/command/webbrowser" "github.com/hashicorp/terraform/internal/getproviders" + "github.com/hashicorp/terraform/internal/terminal" pluginDiscovery "github.com/hashicorp/terraform/plugin/discovery" ) @@ -46,13 +48,9 @@ var HiddenCommands map[string]struct{} // Ui is the cli.Ui used for communicating to the outside world. var Ui cli.Ui -const ( - ErrorPrefix = "e:" - OutputPrefix = "o:" -) - func initCommands( originalWorkingDir string, + streams *terminal.Streams, config *cliconfig.Config, services *disco.Disco, providerSrc getproviders.Source, @@ -83,6 +81,8 @@ func initCommands( meta := command.Meta{ OriginalWorkingDir: originalWorkingDir, + Streams: streams, + View: views.NewView(streams), Color: true, GlobalPluginDirs: globalPluginDirs(), @@ -105,7 +105,7 @@ func initCommands( // The command list is included in the terraform -help // output, which is in turn included in the docs at - // website/docs/commands/index.html.markdown; if you + // website/docs/cli/commands/index.html.markdown; if you // add, remove or reclassify commands then consider updating // that to match. @@ -194,12 +194,6 @@ func initCommands( }, nil }, - "internal-plugin": func() (cli.Command, error) { - return &command.InternalPluginCommand{ - Meta: meta, - }, nil - }, - "login": func() (cli.Command, error) { return &command.LoginCommand{ Meta: meta, @@ -281,9 +275,9 @@ func initCommands( "version": func() (cli.Command, error) { return &command.VersionCommand{ Meta: meta, - Revision: GitCommit, Version: Version, VersionPrerelease: VersionPrerelease, + Platform: getproviders.CurrentPlatform, CheckFunc: commandVersionCheck, }, nil }, diff --git a/communicator/communicator.go b/communicator/communicator.go index 12b725b32..27261421e 100644 --- a/communicator/communicator.go +++ b/communicator/communicator.go @@ -9,16 +9,18 @@ import ( "time" "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/communicator/shared" "github.com/hashicorp/terraform/communicator/ssh" "github.com/hashicorp/terraform/communicator/winrm" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" ) // Communicator is an interface that must be implemented by all communicators // used for any of the provisioners type Communicator interface { - // Connect is used to setup the connection - Connect(terraform.UIOutput) error + // Connect is used to set up the connection + Connect(provisioners.UIOutput) error // Disconnect is used to terminate the connection Disconnect() error @@ -43,13 +45,23 @@ type Communicator interface { } // New returns a configured Communicator or an error if the connection type is not supported -func New(s *terraform.InstanceState) (Communicator, error) { - connType := s.Ephemeral.ConnInfo["type"] +func New(v cty.Value) (Communicator, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) + if err != nil { + return nil, err + } + + typeVal := v.GetAttr("type") + connType := "" + if !typeVal.IsNull() { + connType = typeVal.AsString() + } + switch connType { case "ssh", "": // The default connection type is ssh, so if connType is empty use ssh - return ssh.New(s) + return ssh.New(v) case "winrm": - return winrm.New(s) + return winrm.New(v) default: return nil, fmt.Errorf("connection type '%s' not supported", connType) } diff --git a/communicator/communicator_mock.go b/communicator/communicator_mock.go index 49304a070..b619560c0 100644 --- a/communicator/communicator_mock.go +++ b/communicator/communicator_mock.go @@ -8,7 +8,7 @@ import ( "time" "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/provisioners" ) // MockCommunicator is an implementation of Communicator that can be used for tests. @@ -24,7 +24,7 @@ type MockCommunicator struct { } // Connect implementation of communicator.Communicator interface -func (c *MockCommunicator) Connect(o terraform.UIOutput) error { +func (c *MockCommunicator) Connect(o provisioners.UIOutput) error { return nil } diff --git a/communicator/communicator_test.go b/communicator/communicator_test.go index e20d0368b..20cdd8ff3 100644 --- a/communicator/communicator_test.go +++ b/communicator/communicator_test.go @@ -8,29 +8,26 @@ import ( "testing" "time" - "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" ) func TestCommunicator_new(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "telnet", - "host": "127.0.0.1", - }, - }, + cfg := map[string]cty.Value{ + "type": cty.StringVal("telnet"), + "host": cty.StringVal("127.0.0.1"), } - if _, err := New(r); err == nil { + + if _, err := New(cty.ObjectVal(cfg)); err == nil { t.Fatalf("expected error with telnet") } - r.Ephemeral.ConnInfo["type"] = "ssh" - if _, err := New(r); err != nil { + cfg["type"] = cty.StringVal("ssh") + if _, err := New(cty.ObjectVal(cfg)); err != nil { t.Fatalf("err: %v", err) } - r.Ephemeral.ConnInfo["type"] = "winrm" - if _, err := New(r); err != nil { + cfg["type"] = cty.StringVal("winrm") + if _, err := New(cty.ObjectVal(cfg)); err != nil { t.Fatalf("err: %v", err) } } diff --git a/communicator/shared/shared.go b/communicator/shared/shared.go index 39cb16961..509aadd28 100644 --- a/communicator/shared/shared.go +++ b/communicator/shared/shared.go @@ -3,8 +3,124 @@ package shared import ( "fmt" "net" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" ) +// ConnectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. Once that is done, we can remove +// this and use a type-specific schema from the communicator to validate +// exactly what is expected for a given connection type. +var ConnectionBlockSupersetSchema = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // Common attributes for both connection types + "host": { + Type: cty.String, + Required: true, + }, + "type": { + Type: cty.String, + Optional: true, + }, + "user": { + Type: cty.String, + Optional: true, + }, + "password": { + Type: cty.String, + Optional: true, + }, + "port": { + Type: cty.String, + Optional: true, + }, + "timeout": { + Type: cty.String, + Optional: true, + }, + "script_path": { + Type: cty.String, + Optional: true, + }, + // For type=ssh only (enforced in ssh communicator) + "target_platform": { + Type: cty.String, + Optional: true, + }, + "private_key": { + Type: cty.String, + Optional: true, + }, + "certificate": { + Type: cty.String, + Optional: true, + }, + "host_key": { + Type: cty.String, + Optional: true, + }, + "agent": { + Type: cty.Bool, + Optional: true, + }, + "agent_identity": { + Type: cty.String, + Optional: true, + }, + "bastion_host": { + Type: cty.String, + Optional: true, + }, + "bastion_host_key": { + Type: cty.String, + Optional: true, + }, + "bastion_port": { + Type: cty.Number, + Optional: true, + }, + "bastion_user": { + Type: cty.String, + Optional: true, + }, + "bastion_password": { + Type: cty.String, + Optional: true, + }, + "bastion_private_key": { + Type: cty.String, + Optional: true, + }, + "bastion_certificate": { + Type: cty.String, + Optional: true, + }, + + // For type=winrm only (enforced in winrm communicator) + "https": { + Type: cty.Bool, + Optional: true, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + }, + "cacert": { + Type: cty.String, + Optional: true, + }, + "use_ntlm": { + Type: cty.Bool, + Optional: true, + }, + }, +} + // IpFormat formats the IP correctly, so we don't provide IPv6 address in an IPv4 format during node communication. We return the ip parameter as is if it's an IPv4 address or a hostname. func IpFormat(ip string) string { ipObj := net.ParseIP(ip) diff --git a/communicator/ssh/communicator.go b/communicator/ssh/communicator.go index f39d70898..afbe54945 100644 --- a/communicator/ssh/communicator.go +++ b/communicator/ssh/communicator.go @@ -20,9 +20,12 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" + + _ "github.com/hashicorp/terraform/internal/logging" ) const ( @@ -52,7 +55,6 @@ type Communicator struct { client *ssh.Client config *sshConfig conn net.Conn - address string cancelKeepAlive context.CancelFunc lock sync.Mutex @@ -84,8 +86,8 @@ func (e fatalError) FatalError() error { } // New creates a new communicator implementation over SSH. -func New(s *terraform.InstanceState) (*Communicator, error) { - connInfo, err := parseConnectionInfo(s) +func New(v cty.Value) (*Communicator, error) { + connInfo, err := parseConnectionInfo(v) if err != nil { return nil, err } @@ -95,7 +97,7 @@ func New(s *terraform.InstanceState) (*Communicator, error) { return nil, err } - // Setup the random number generator once. The seed value is the + // Set up the random number generator once. The seed value is the // time multiplied by the PID. This can overflow the int64 but that // is okay. We multiply by the PID in case we have multiple processes // grabbing this at the same time. This is possible with Terraform and @@ -117,7 +119,7 @@ func New(s *terraform.InstanceState) (*Communicator, error) { } // Connect implementation of communicator.Communicator interface -func (c *Communicator) Connect(o terraform.UIOutput) (err error) { +func (c *Communicator) Connect(o provisioners.UIOutput) (err error) { // Grab a lock so we can modify our internal attributes c.lock.Lock() defer c.lock.Unlock() @@ -139,13 +141,15 @@ func (c *Communicator) Connect(o terraform.UIOutput) (err error) { " Private key: %t\n"+ " Certificate: %t\n"+ " SSH Agent: %t\n"+ - " Checking Host Key: %t", + " Checking Host Key: %t\n"+ + " Target Platform: %s\n", c.connInfo.Host, c.connInfo.User, c.connInfo.Password != "", c.connInfo.PrivateKey != "", c.connInfo.Certificate != "", c.connInfo.Agent, c.connInfo.HostKey != "", + c.connInfo.TargetPlatform, )) if c.connInfo.BastionHost != "" { @@ -338,12 +342,12 @@ func (c *Communicator) Start(cmd *remote.Cmd) error { return err } - // Setup our session + // Set up our session session.Stdin = cmd.Stdin session.Stdout = cmd.Stdout session.Stderr = cmd.Stderr - if !c.config.noPty { + if !c.config.noPty && c.connInfo.TargetPlatform != TargetPlatformWindows { // Request a PTY termModes := ssh.TerminalModes{ ssh.ECHO: 0, // do not echo @@ -425,35 +429,35 @@ func (c *Communicator) UploadScript(path string, input io.Reader) error { if err != nil { return fmt.Errorf("Error reading script: %s", err) } - var script bytes.Buffer - if string(prefix) != "#!" { + + if string(prefix) != "#!" && c.connInfo.TargetPlatform != TargetPlatformWindows { script.WriteString(DefaultShebang) } - script.ReadFrom(reader) + if err := c.Upload(path, &script); err != nil { return err } + if c.connInfo.TargetPlatform != TargetPlatformWindows { + var stdout, stderr bytes.Buffer + cmd := &remote.Cmd{ + Command: fmt.Sprintf("chmod 0777 %s", path), + Stdout: &stdout, + Stderr: &stderr, + } + if err := c.Start(cmd); err != nil { + return fmt.Errorf( + "Error chmodding script file to 0777 in remote "+ + "machine: %s", err) + } - var stdout, stderr bytes.Buffer - cmd := &remote.Cmd{ - Command: fmt.Sprintf("chmod 0777 %s", path), - Stdout: &stdout, - Stderr: &stderr, + if err := cmd.Wait(); err != nil { + return fmt.Errorf( + "Error chmodding script file to 0777 in remote "+ + "machine %v: %s %s", err, stdout.String(), stderr.String()) + } } - if err := c.Start(cmd); err != nil { - return fmt.Errorf( - "Error chmodding script file to 0777 in remote "+ - "machine: %s", err) - } - - if err := cmd.Wait(); err != nil { - return fmt.Errorf( - "Error chmodding script file to 0777 in remote "+ - "machine %v: %s %s", err, stdout.String(), stderr.String()) - } - return nil } diff --git a/communicator/ssh/communicator_test.go b/communicator/ssh/communicator_test.go index 445fbebb9..d71044320 100644 --- a/communicator/ssh/communicator_test.go +++ b/communicator/ssh/communicator_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" "golang.org/x/crypto/ssh" ) @@ -99,6 +99,7 @@ func newMockLineServer(t *testing.T, signer ssh.Signer, pubKey string) string { t.Log("Accepted channel") go func(in <-chan *ssh.Request) { + defer channel.Close() for req := range in { // since this channel's requests are serviced serially, // this will block keepalive probes, and can simulate a @@ -112,8 +113,6 @@ func newMockLineServer(t *testing.T, signer ssh.Signer, pubKey string) string { } } }(requests) - - defer channel.Close() } conn.Close() }() @@ -125,20 +124,16 @@ func TestNew_Invalid(t *testing.T) { address := newMockLineServer(t, nil, testClientPublicKey) parts := strings.Split(address, ":") - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "user", - "password": "i-am-invalid", - "host": parts[0], - "port": parts[1], - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("i-am-invalid"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -150,19 +145,15 @@ func TestNew_Invalid(t *testing.T) { } func TestNew_InvalidHost(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "user", - "password": "i-am-invalid", - "port": "22", - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("i-am-invalid"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) - _, err := New(r) + _, err := New(v) if err == nil { t.Fatal("should have had an error creating communicator") } @@ -172,20 +163,16 @@ func TestStart(t *testing.T) { address := newMockLineServer(t, nil, testClientPublicKey) parts := strings.Split(address, ":") - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "user", - "password": "pass", - "host": parts[0], - "port": parts[1], - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -211,19 +198,15 @@ func TestKeepAlives(t *testing.T) { address := newMockLineServer(t, nil, testClientPublicKey) parts := strings.Split(address, ":") - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "user", - "password": "pass", - "host": parts[0], - "port": parts[1], - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -261,19 +244,16 @@ func TestFailedKeepAlives(t *testing.T) { address := newMockLineServer(t, nil, testClientPublicKey) parts := strings.Split(address, ":") - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "user", - "password": "pass", - "host": parts[0], - "port": parts[1], - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -296,20 +276,16 @@ func TestLostConnection(t *testing.T) { address := newMockLineServer(t, nil, testClientPublicKey) parts := strings.Split(address, ":") - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "user", - "password": "pass", - "host": parts[0], - "port": parts[1], - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(parts[0]), + "port": cty.StringVal(parts[1]), + "timeout": cty.StringVal("30s"), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -586,19 +562,15 @@ func TestAccUploadFile(t *testing.T) { t.Skip() } - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": os.Getenv("USER"), - "host": "127.0.0.1", - "port": "22", - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal(os.Getenv("USER")), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -634,19 +606,15 @@ func TestAccHugeUploadFile(t *testing.T) { t.Skip() } - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": os.Getenv("USER"), - "host": "127.0.0.1", - "port": "22", - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + "user": cty.StringVal(os.Getenv("USER")), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -706,16 +674,13 @@ func TestScriptPath(t *testing.T) { } for _, tc := range cases { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "host": "127.0.0.1", - "script_path": tc.Input, - }, - }, - } - comm, err := New(r) + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + "script_path": cty.StringVal(tc.Input), + }) + + comm, err := New(v) if err != nil { t.Fatalf("err: %s", err) } @@ -735,14 +700,10 @@ func TestScriptPath_randSeed(t *testing.T) { // Pre GH-4186 fix, this value was the deterministic start the pseudorandom // chain of unseeded math/rand values for Int31(). staticSeedPath := "/tmp/terraform_1298498081.sh" - c, err := New(&terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "host": "127.0.0.1", - }, - }, - }) + c, err := New(cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "host": cty.StringVal("127.0.0.1"), + })) if err != nil { t.Fatalf("err: %s", err) } @@ -752,34 +713,6 @@ func TestScriptPath_randSeed(t *testing.T) { } } -const testClientPrivateKey = `-----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAxOgNXOJ/jrRDxBZTSk2X9otNy9zcpUmJr5ifDi5sy7j2ZiQS -beBt1Wf+tLNWis8Cyq06ttEvjjRuM75yucyD6GrqDTXVCSm4PeOIQeDhPhw26wYZ -O0h/mFgrAiCwaEl8AFXVBInOhVn/0nqgUpkwckh76bjTsNeifkiugK3cfJOuBdrU -ZGbgugJjmYo4Cmv7ECo1gCFT5N+BAjOji3z3N5ClBH5HaWC77jH7kTH0k5cZ+ZRQ -tG9EqLyvWlnzTAR/Yly0JInkOa16Ms1Au5sIJwEoJfHKsNVK06IlLob53nblwYW0 -H5gv1Kb/rS+nUkpPtA5YFShB7iZnPLPPv6qXSwIDAQABAoIBAC0UY1rMkB9/rbQK -2G6+bPgI1HrDydAdkeQdsOxyPH43jlG8GGwHYZ3l/S4pkLqewijcmACay6Rm5IP8 -Kg/XfquLLqJvnKJIZuHkYaGTdn3dv8T21Hf6FRwvs0j9auW1TSpWfDpZwmpNPIBX -irTeVXUUmynbIrvt4km/IhRbuYrbbb964CLYD1DCl3XssXxoRNvPpc5EtOuyDorA -5g1hvZR1FqbOAmOuNQMYJociMuWB8mCaHb+o1Sg4A65OLXxoKs0cuwInJ/n/R4Z3 -+GrV+x5ypBMxXgjjQtKMLEOujkvxs1cp34hkbhKMHHXxbMu5jl74YtGGsLLk90rq -ieZGIgECgYEA49OM9mMCrDoFUTZdJaSARA/MOXkdQgrqVTv9kUHee7oeMZZ6lS0i -bPU7g+Bq+UAN0qcw9x992eAElKjBA71Q5UbZYWh29BDMZd8bRJmwz4P6aSMoYLWI -Sr31caJU9LdmPFatarNeehjSJtlTuoZD9+NElnnUwNaTeOOo5UdhTQsCgYEA3UGm -QWoDUttFwK9oL2KL8M54Bx6EzNhnyk03WrqBbR7PJcPKnsF0R/0soQ+y0FW0r8RJ -TqG6ze5fUJII72B4GlMTQdP+BIvaKQttwWQTNIjbbv4NksF445gdVOO1xi9SvQ7k -uvMVxOb+1jL3HAFa3furWu2tJRDs6dhuaILLxsECgYEAhnhlKUBDYZhVbxvhWsh/ -lKymY/3ikQqUSX7BKa1xPiIalDY3YDllql4MpMgfG8L85asdMZ96ztB0o7H/Ss/B -IbLxt5bLLz+DBVXsaE82lyVU9h10RbCgI01/w3SHJHHjfBXFAcehKfvgfmGkE+IP -2A5ie1aphrCgFqh5FetNuQUCgYEAibL42I804FUtFR1VduAa/dRRqQSaW6528dWa -lLGsKRBalUNEEAeP6dmr89UEUVp1qEo94V0QGGe5FDi+rNPaC3AWdQqNdaDgNlkx -hoFU3oYqIuqj4ejc5rBd2N4a2+vJz3W8bokozDGC+iYf2mMRfUPKwj1XW9Er0OFs -3UhBsEECgYEAto/iJB7ZlCM7EyV9JW0tsEt83rbKMQ/Ex0ShbBIejej0Xx7bwx60 -tVgay+bzJnNkXu6J4XVI98A/WsdI2kW4hL0STYdHV5HVA1l87V4ZbvTF2Bx8a8RJ -OF3UjpMTWKqOprw9nAu5VuwNRVzORF8ER8rgGeaR2/gsSvIYFy9VXq8= ------END RSA PRIVATE KEY-----` - var testClientPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDE6A1c4n+OtEPEFlNKTZf2i03L3NylSYmvmJ8OLmzLuPZmJBJt4G3VZ/60s1aKzwLKrTq20S+ONG4zvnK5zIPoauoNNdUJKbg944hB4OE+HDbrBhk7SH+YWCsCILBoSXwAVdUEic6FWf/SeqBSmTBySHvpuNOw16J+SK6Ardx8k64F2tRkZuC6AmOZijgKa/sQKjWAIVPk34ECM6OLfPc3kKUEfkdpYLvuMfuRMfSTlxn5lFC0b0SovK9aWfNMBH9iXLQkieQ5rXoyzUC7mwgnASgl8cqw1UrToiUuhvneduXBhbQfmC/Upv+tL6dSSk+0DlgVKEHuJmc8s8+/qpdL` func acceptUserPass(goodUser, goodPass string) func(ssh.ConnMetadata, []byte) (*ssh.Permissions, error) { diff --git a/communicator/ssh/password_test.go b/communicator/ssh/password_test.go index e513716d0..219669e4b 100644 --- a/communicator/ssh/password_test.go +++ b/communicator/ssh/password_test.go @@ -3,18 +3,8 @@ package ssh import ( "reflect" "testing" - - "golang.org/x/crypto/ssh" ) -func TestPasswordKeyboardInteractive_Impl(t *testing.T) { - var raw interface{} - raw = PasswordKeyboardInteractive("foo") - if _, ok := raw.(ssh.KeyboardInteractiveChallenge); !ok { - t.Fatal("PasswordKeyboardInteractive must implement KeyboardInteractiveChallenge") - } -} - func TestPasswordKeybardInteractive_Challenge(t *testing.T) { p := PasswordKeyboardInteractive("foo") result, err := p("foo", "bar", []string{"one", "two"}, nil) diff --git a/communicator/ssh/provisioner.go b/communicator/ssh/provisioner.go index b6fe80a4a..a3fa80c42 100644 --- a/communicator/ssh/provisioner.go +++ b/communicator/ssh/provisioner.go @@ -10,13 +10,13 @@ import ( "net" "os" "path/filepath" + "strconv" "strings" "time" "github.com/hashicorp/terraform/communicator/shared" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" sshagent "github.com/xanzy/ssh-agent" + "github.com/zclconf/go-cty/cty" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "golang.org/x/crypto/ssh/knownhosts" @@ -29,54 +29,126 @@ const ( // DefaultPort is used if there is no port given DefaultPort = 22 - // DefaultScriptPath is used as the path to copy the file to - // for remote execution if not provided otherwise. - DefaultScriptPath = "/tmp/terraform_%RAND%.sh" + // DefaultUnixScriptPath is used as the path to copy the file to + // for remote execution on unix if not provided otherwise. + DefaultUnixScriptPath = "/tmp/terraform_%RAND%.sh" + // DefaultWindowsScriptPath is used as the path to copy the file to + // for remote execution on windows if not provided otherwise. + DefaultWindowsScriptPath = "C:/windows/temp/terraform_%RAND%.cmd" // DefaultTimeout is used if there is no timeout given DefaultTimeout = 5 * time.Minute + + // TargetPlatformUnix used for cleaner code, and is used if no target platform has been specified + TargetPlatformUnix = "unix" + //TargetPlatformWindows used for cleaner code + TargetPlatformWindows = "windows" ) // connectionInfo is decoded from the ConnInfo of the resource. These are the // only keys we look at. If a PrivateKey is given, that is used instead // of a password. type connectionInfo struct { - User string - Password string - PrivateKey string `mapstructure:"private_key"` - Certificate string `mapstructure:"certificate"` - Host string - HostKey string `mapstructure:"host_key"` - Port int - Agent bool - Timeout string - ScriptPath string `mapstructure:"script_path"` - TimeoutVal time.Duration `mapstructure:"-"` + User string + Password string + PrivateKey string + Certificate string + Host string + HostKey string + Port int + Agent bool + ScriptPath string + TargetPlatform string + Timeout string + TimeoutVal time.Duration - BastionUser string `mapstructure:"bastion_user"` - BastionPassword string `mapstructure:"bastion_password"` - BastionPrivateKey string `mapstructure:"bastion_private_key"` - BastionCertificate string `mapstructure:"bastion_certificate"` - BastionHost string `mapstructure:"bastion_host"` - BastionHostKey string `mapstructure:"bastion_host_key"` - BastionPort int `mapstructure:"bastion_port"` + BastionUser string + BastionPassword string + BastionPrivateKey string + BastionCertificate string + BastionHost string + BastionHostKey string + BastionPort int - AgentIdentity string `mapstructure:"agent_identity"` + AgentIdentity string } -// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into -// a ConnectionInfo struct -func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) { +// decodeConnInfo decodes the given cty.Value using the same behavior as the +// lgeacy mapstructure decoder in order to preserve as much of the existing +// logic as possible for compatibility. +func decodeConnInfo(v cty.Value) (*connectionInfo, error) { connInfo := &connectionInfo{} - decConf := &mapstructure.DecoderConfig{ - WeaklyTypedInput: true, - Result: connInfo, + if v.IsNull() { + return connInfo, nil } - dec, err := mapstructure.NewDecoder(decConf) + + for k, v := range v.AsValueMap() { + if v.IsNull() { + continue + } + + switch k { + case "user": + connInfo.User = v.AsString() + case "password": + connInfo.Password = v.AsString() + case "private_key": + connInfo.PrivateKey = v.AsString() + case "certificate": + connInfo.Certificate = v.AsString() + case "host": + connInfo.Host = v.AsString() + case "host_key": + connInfo.HostKey = v.AsString() + case "port": + p, err := strconv.Atoi(v.AsString()) + if err != nil { + return nil, err + } + connInfo.Port = p + case "agent": + connInfo.Agent = v.True() + case "script_path": + connInfo.ScriptPath = v.AsString() + case "target_platform": + connInfo.TargetPlatform = v.AsString() + case "timeout": + connInfo.Timeout = v.AsString() + case "bastion_user": + connInfo.BastionUser = v.AsString() + case "bastion_password": + connInfo.BastionPassword = v.AsString() + case "bastion_private_key": + connInfo.BastionPrivateKey = v.AsString() + case "bastion_certificate": + connInfo.BastionCertificate = v.AsString() + case "bastion_host": + connInfo.BastionHost = v.AsString() + case "bastion_host_key": + connInfo.BastionHostKey = v.AsString() + case "bastion_port": + p, err := strconv.Atoi(v.AsString()) + if err != nil { + return nil, err + } + connInfo.BastionPort = p + case "agent_identity": + connInfo.AgentIdentity = v.AsString() + } + } + return connInfo, nil +} + +// parseConnectionInfo is used to convert the raw configuration into the +// *connectionInfo struct. +func parseConnectionInfo(v cty.Value) (*connectionInfo, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) if err != nil { return nil, err } - if err := dec.Decode(s.Ephemeral.ConnInfo); err != nil { + + connInfo, err := decodeConnInfo(v) + if err != nil { return nil, err } @@ -85,7 +157,8 @@ func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) { // // And if SSH_AUTH_SOCK is not set, there's no agent to connect to, so we // shouldn't try. - if s.Ephemeral.ConnInfo["agent"] == "" && os.Getenv("SSH_AUTH_SOCK") != "" { + agent := v.GetAttr("agent") + if agent.IsNull() && os.Getenv("SSH_AUTH_SOCK") != "" { connInfo.Agent = true } @@ -106,8 +179,19 @@ func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) { if connInfo.Port == 0 { connInfo.Port = DefaultPort } - if connInfo.ScriptPath == "" { - connInfo.ScriptPath = DefaultScriptPath + // Set default targetPlatform to unix if it's empty + if connInfo.TargetPlatform == "" { + connInfo.TargetPlatform = TargetPlatformUnix + } else if connInfo.TargetPlatform != TargetPlatformUnix && connInfo.TargetPlatform != TargetPlatformWindows { + return nil, fmt.Errorf("target_platform for provisioner has to be either %s or %s", TargetPlatformUnix, TargetPlatformWindows) + } + // Choose an appropriate default script path based on the target platform. There is no single + // suitable default script path which works on both UNIX and Windows targets. + if connInfo.ScriptPath == "" && connInfo.TargetPlatform == TargetPlatformUnix { + connInfo.ScriptPath = DefaultUnixScriptPath + } + if connInfo.ScriptPath == "" && connInfo.TargetPlatform == TargetPlatformWindows { + connInfo.ScriptPath = DefaultWindowsScriptPath } if connInfo.Timeout != "" { connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) @@ -328,7 +412,7 @@ func readPrivateKey(pk string) (ssh.AuthMethod, error) { } func connectToAgent(connInfo *connectionInfo) (*sshAgent, error) { - if connInfo.Agent != true { + if !connInfo.Agent { // No agent configured return nil, nil } @@ -463,13 +547,6 @@ func (s *sshAgent) sortSigners(signers []ssh.Signer) { continue } } - - ss := []string{} - for _, signer := range signers { - pk := signer.PublicKey() - k := pk.(*agent.Key) - ss = append(ss, k.Comment) - } } func (s *sshAgent) Signers() ([]ssh.Signer, error) { diff --git a/communicator/ssh/provisioner_test.go b/communicator/ssh/provisioner_test.go index f8e0f77d8..ace46f49d 100644 --- a/communicator/ssh/provisioner_test.go +++ b/communicator/ssh/provisioner_test.go @@ -3,28 +3,23 @@ package ssh import ( "testing" - "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" ) func TestProvisioner_connInfo(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "root", - "password": "supersecret", - "private_key": "someprivatekeycontents", - "certificate": "somecertificate", - "host": "127.0.0.1", - "port": "22", - "timeout": "30s", + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "certificate": cty.StringVal("somecertificate"), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + "bastion_host": cty.StringVal("127.0.1.1"), + }) - "bastion_host": "127.0.1.1", - }, - }, - } - - conf, err := parseConnectionInfo(r) + conf, err := parseConnectionInfo(v) if err != nil { t.Fatalf("err: %v", err) } @@ -50,7 +45,10 @@ func TestProvisioner_connInfo(t *testing.T) { if conf.Timeout != "30s" { t.Fatalf("bad: %v", conf) } - if conf.ScriptPath != DefaultScriptPath { + if conf.ScriptPath != DefaultUnixScriptPath { + t.Fatalf("bad: %v", conf) + } + if conf.TargetPlatform != TargetPlatformUnix { t.Fatalf("bad: %v", conf) } if conf.BastionHost != "127.0.1.1" { @@ -71,24 +69,18 @@ func TestProvisioner_connInfo(t *testing.T) { } func TestProvisioner_connInfoIpv6(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "root", - "password": "supersecret", - "private_key": "someprivatekeycontents", - "certificate": "somecertificate", - "host": "::1", - "port": "22", - "timeout": "30s", + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "host": cty.StringVal("::1"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + "bastion_host": cty.StringVal("::1"), + }) - "bastion_host": "::1", - }, - }, - } - - conf, err := parseConnectionInfo(r) + conf, err := parseConnectionInfo(v) if err != nil { t.Fatalf("err: %v", err) } @@ -103,22 +95,18 @@ func TestProvisioner_connInfoIpv6(t *testing.T) { } func TestProvisioner_connInfoHostname(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "root", - "password": "supersecret", - "private_key": "someprivatekeycontents", - "host": "example.com", - "port": "22", - "timeout": "30s", - "bastion_host": "example.com", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "host": cty.StringVal("example.com"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + "bastion_host": cty.StringVal("example.com"), + }) - conf, err := parseConnectionInfo(r) + conf, err := parseConnectionInfo(v) if err != nil { t.Fatalf("err: %v", err) } @@ -133,21 +121,16 @@ func TestProvisioner_connInfoHostname(t *testing.T) { } func TestProvisioner_connInfoEmptyHostname(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "ssh", - "user": "root", - "password": "supersecret", - "private_key": "someprivatekeycontents", - "host": "", - "port": "22", - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "user": cty.StringVal("root"), + "password": cty.StringVal("supersecret"), + "private_key": cty.StringVal("someprivatekeycontents"), + "port": cty.StringVal("22"), + "timeout": cty.StringVal("30s"), + }) - _, err := parseConnectionInfo(r) + _, err := parseConnectionInfo(v) if err == nil { t.Fatalf("bad: should not allow empty host") } diff --git a/communicator/ssh/ssh_test.go b/communicator/ssh/ssh_test.go index 9cd10a0a3..c1086569f 100644 --- a/communicator/ssh/ssh_test.go +++ b/communicator/ssh/ssh_test.go @@ -16,7 +16,7 @@ import ( // verify that we can locate public key data func TestFindKeyData(t *testing.T) { - // setup a test directory + // set up a test directory td, err := ioutil.TempDir("", "ssh") if err != nil { t.Fatal(err) @@ -81,10 +81,10 @@ func generateSSHKey(t *testing.T, idFile string) ssh.PublicKey { } privFile, err := os.OpenFile(idFile, os.O_RDWR|os.O_CREATE, 0600) - defer privFile.Close() if err != nil { t.Fatal(err) } + defer privFile.Close() privPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)} if err := pem.Encode(privFile, privPEM); err != nil { t.Fatal(err) diff --git a/communicator/winrm/communicator.go b/communicator/winrm/communicator.go index 827344917..4f9f28838 100644 --- a/communicator/winrm/communicator.go +++ b/communicator/winrm/communicator.go @@ -10,9 +10,10 @@ import ( "time" "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/provisioners" "github.com/masterzen/winrm" "github.com/packer-community/winrmcp/winrmcp" + "github.com/zclconf/go-cty/cty" ) // Communicator represents the WinRM communicator @@ -24,8 +25,8 @@ type Communicator struct { } // New creates a new communicator implementation over WinRM. -func New(s *terraform.InstanceState) (*Communicator, error) { - connInfo, err := parseConnectionInfo(s) +func New(v cty.Value) (*Communicator, error) { + connInfo, err := parseConnectionInfo(v) if err != nil { return nil, err } @@ -52,7 +53,7 @@ func New(s *terraform.InstanceState) (*Communicator, error) { } // Connect implementation of communicator.Communicator interface -func (c *Communicator) Connect(o terraform.UIOutput) error { +func (c *Communicator) Connect(o provisioners.UIOutput) error { // Set the client to nil since we'll (re)create it c.client = nil diff --git a/communicator/winrm/communicator_test.go b/communicator/winrm/communicator_test.go index f6a049932..bd8d2ecd5 100644 --- a/communicator/winrm/communicator_test.go +++ b/communicator/winrm/communicator_test.go @@ -9,7 +9,8 @@ import ( "github.com/dylanmei/winrmtest" "github.com/hashicorp/terraform/communicator/remote" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/communicator/shared" + "github.com/zclconf/go-cty/cty" ) func newMockWinRMServer(t *testing.T) *winrmtest.Remote { @@ -47,20 +48,16 @@ func TestStart(t *testing.T) { wrm := newMockWinRMServer(t) defer wrm.Close() - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "user", - "password": "pass", - "host": wrm.Host, - "port": strconv.Itoa(wrm.Port), - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -84,21 +81,16 @@ func TestStart(t *testing.T) { func TestUpload(t *testing.T) { wrm := newMockWinRMServer(t) defer wrm.Close() + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "user", - "password": "pass", - "host": wrm.Host, - "port": strconv.Itoa(wrm.Port), - "timeout": "30s", - }, - }, - } - - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -131,15 +123,13 @@ func TestScriptPath(t *testing.T) { } for _, tc := range cases { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "script_path": tc.Input, - }, - }, - } - comm, err := New(r) + v := cty.ObjectVal(map[string]cty.Value{ + "host": cty.StringVal(""), + "type": cty.StringVal("winrm"), + "script_path": cty.StringVal(tc.Input), + }) + + comm, err := New(v) if err != nil { t.Fatalf("err: %s", err) } @@ -158,21 +148,16 @@ func TestScriptPath(t *testing.T) { func TestNoTransportDecorator(t *testing.T) { wrm := newMockWinRMServer(t) defer wrm.Close() + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "timeout": cty.StringVal("30s"), + }) - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "user", - "password": "pass", - "host": wrm.Host, - "port": strconv.Itoa(wrm.Port), - "timeout": "30s", - }, - }, - } - - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -192,21 +177,17 @@ func TestTransportDecorator(t *testing.T) { wrm := newMockWinRMServer(t) defer wrm.Close() - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "user", - "password": "pass", - "host": wrm.Host, - "port": strconv.Itoa(wrm.Port), - "use_ntlm": "true", - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("user"), + "password": cty.StringVal("pass"), + "host": cty.StringVal(wrm.Host), + "port": cty.StringVal(strconv.Itoa(wrm.Port)), + "use_ntlm": cty.StringVal("true"), + "timeout": cty.StringVal("30s"), + }) - c, err := New(r) + c, err := New(v) if err != nil { t.Fatalf("error creating communicator: %s", err) } @@ -226,7 +207,7 @@ func TestScriptPath_randSeed(t *testing.T) { // Pre GH-4186 fix, this value was the deterministic start the pseudorandom // chain of unseeded math/rand values for Int31(). staticSeedPath := "C:/Temp/terraform_1298498081.cmd" - c, err := New(&terraform.InstanceState{}) + c, err := New(cty.NullVal(shared.ConnectionBlockSupersetSchema.ImpliedType())) if err != nil { t.Fatalf("err: %s", err) } diff --git a/communicator/winrm/provisioner.go b/communicator/winrm/provisioner.go index 5cef1309d..7a71fe92f 100644 --- a/communicator/winrm/provisioner.go +++ b/communicator/winrm/provisioner.go @@ -4,12 +4,12 @@ import ( "fmt" "log" "path/filepath" + "strconv" "strings" "time" "github.com/hashicorp/terraform/communicator/shared" - "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" + "github.com/zclconf/go-cty/cty" ) const ( @@ -47,22 +47,62 @@ type connectionInfo struct { TimeoutVal time.Duration `mapstructure:"-"` } +// decodeConnInfo decodes the given cty.Value using the same behavior as the +// lgeacy mapstructure decoder in order to preserve as much of the existing +// logic as possible for compatibility. +func decodeConnInfo(v cty.Value) (*connectionInfo, error) { + connInfo := &connectionInfo{} + if v.IsNull() { + return connInfo, nil + } + + for k, v := range v.AsValueMap() { + if v.IsNull() { + continue + } + + switch k { + case "user": + connInfo.User = v.AsString() + case "password": + connInfo.Password = v.AsString() + case "host": + connInfo.Host = v.AsString() + case "port": + p, err := strconv.Atoi(v.AsString()) + if err != nil { + return nil, err + } + connInfo.Port = p + case "https": + connInfo.HTTPS = v.True() + case "insecure": + connInfo.Insecure = v.True() + case "use_ntlm": + connInfo.NTLM = v.True() + case "cacert": + connInfo.CACert = v.AsString() + case "script_path": + connInfo.ScriptPath = v.AsString() + case "timeout": + connInfo.Timeout = v.AsString() + } + } + return connInfo, nil +} + // parseConnectionInfo is used to convert the ConnInfo of the InstanceState into // a ConnectionInfo struct -func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) { - connInfo := &connectionInfo{} - decConf := &mapstructure.DecoderConfig{ - WeaklyTypedInput: true, - Result: connInfo, - } - dec, err := mapstructure.NewDecoder(decConf) +func parseConnectionInfo(v cty.Value) (*connectionInfo, error) { + v, err := shared.ConnectionBlockSupersetSchema.CoerceValue(v) if err != nil { return nil, err } - if err := dec.Decode(s.Ephemeral.ConnInfo); err != nil { + + connInfo, err := decodeConnInfo(v) + if err != nil { return nil, err } - // Check on script paths which point to the default Windows TEMP folder because files // which are put in there very early in the boot process could get cleaned/deleted // before you had the change to execute them. diff --git a/communicator/winrm/provisioner_test.go b/communicator/winrm/provisioner_test.go index fbc45c34b..50718aa86 100644 --- a/communicator/winrm/provisioner_test.go +++ b/communicator/winrm/provisioner_test.go @@ -3,23 +3,19 @@ package winrm import ( "testing" - "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" ) func TestProvisioner_defaultHTTPSPort(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "Administrator", - "password": "supersecret", - "host": "127.0.0.1", - "https": "true", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("127.0.0.1"), + "https": cty.True, + }) - conf, err := parseConnectionInfo(r) + conf, err := parseConnectionInfo(v) if err != nil { t.Fatalf("err: %v", err) } @@ -32,22 +28,18 @@ func TestProvisioner_defaultHTTPSPort(t *testing.T) { } func TestProvisioner_connInfo(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "Administrator", - "password": "supersecret", - "host": "127.0.0.1", - "port": "5985", - "https": "true", - "use_ntlm": "true", - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("5985"), + "https": cty.True, + "use_ntlm": cty.True, + "timeout": cty.StringVal("30s"), + }) - conf, err := parseConnectionInfo(r) + conf, err := parseConnectionInfo(v) if err != nil { t.Fatalf("err: %v", err) } @@ -100,23 +92,18 @@ CqDUFjhydXxYRsxXBBrEiLOE5BdtJR1sH/QHxIJe23C9iHI2nS1NbLziNEApLwC4 GnSud83VUo9G9w== -----END CERTIFICATE----- ` + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("127.0.0.1"), + "port": cty.StringVal("5985"), + "https": cty.True, + "timeout": cty.StringVal("30s"), + "cacert": cty.StringVal(caCert), + }) - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "Administrator", - "password": "supersecret", - "host": "127.0.0.1", - "port": "5985", - "https": "true", - "timeout": "30s", - "cacert": caCert, - }, - }, - } - - conf, err := parseConnectionInfo(r) + conf, err := parseConnectionInfo(v) if err != nil { t.Fatalf("err: %v", err) } @@ -148,21 +135,17 @@ GnSud83VUo9G9w== } func TestProvisioner_connInfoIpv6(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "Administrator", - "password": "supersecret", - "host": "::1", - "port": "5985", - "https": "true", - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("::1"), + "port": cty.StringVal("5985"), + "https": cty.True, + "timeout": cty.StringVal("30s"), + }) - conf, err := parseConnectionInfo(r) + conf, err := parseConnectionInfo(v) if err != nil { t.Fatalf("err: %v", err) } @@ -191,21 +174,17 @@ func TestProvisioner_connInfoIpv6(t *testing.T) { } func TestProvisioner_connInfoHostname(t *testing.T) { - r := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "type": "winrm", - "user": "Administrator", - "password": "supersecret", - "host": "example.com", - "port": "5985", - "https": "true", - "timeout": "30s", - }, - }, - } + v := cty.ObjectVal(map[string]cty.Value{ + "type": cty.StringVal("winrm"), + "user": cty.StringVal("Administrator"), + "password": cty.StringVal("supersecret"), + "host": cty.StringVal("example.com"), + "port": cty.StringVal("5985"), + "https": cty.True, + "timeout": cty.StringVal("30s"), + }) - conf, err := parseConnectionInfo(r) + conf, err := parseConnectionInfo(v) if err != nil { t.Fatalf("err: %v", err) } @@ -235,38 +214,26 @@ func TestProvisioner_connInfoHostname(t *testing.T) { func TestProvisioner_formatDuration(t *testing.T) { cases := map[string]struct { - InstanceState *terraform.InstanceState - Result string + Config map[string]cty.Value + Result string }{ "testSeconds": { - InstanceState: &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "timeout": "90s", - }, - }, + Config: map[string]cty.Value{ + "timeout": cty.StringVal("90s"), }, Result: "PT1M30S", }, "testMinutes": { - InstanceState: &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "timeout": "5m", - }, - }, + Config: map[string]cty.Value{ + "timeout": cty.StringVal("5m"), }, Result: "PT5M", }, "testHours": { - InstanceState: &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: map[string]string{ - "timeout": "1h", - }, - }, + Config: map[string]cty.Value{ + "timeout": cty.StringVal("1h"), }, Result: "PT1H", @@ -274,7 +241,10 @@ func TestProvisioner_formatDuration(t *testing.T) { } for name, tc := range cases { - conf, err := parseConnectionInfo(tc.InstanceState) + // host is required in the schema + tc.Config["host"] = cty.StringVal("") + + conf, err := parseConnectionInfo(cty.ObjectVal(tc.Config)) if err != nil { t.Fatalf("err: %v", err) } diff --git a/configs/compat_shim.go b/configs/compat_shim.go index 4c6c1b75e..47f512b50 100644 --- a/configs/compat_shim.go +++ b/configs/compat_shim.go @@ -147,22 +147,81 @@ func warnForDeprecatedInterpolationsInExpr(expr hcl.Expression) hcl.Diagnostics return nil } - return hclsyntax.VisitAll(node, func(n hclsyntax.Node) hcl.Diagnostics { - e, ok := n.(*hclsyntax.TemplateWrapExpr) - if !ok { - // We're only interested in TemplateWrapExpr, because that's how - // the HCL native syntax parser represents the case of a template - // that consists entirely of a single interpolation expression, which - // is therefore subject to the special case of passing through the - // inner value without conversion to string. - return nil - } - - return hcl.Diagnostics{&hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: "Interpolation-only expressions are deprecated", - Detail: "Terraform 0.11 and earlier required all non-constant expressions to be provided via interpolation syntax, but this pattern is now deprecated. To silence this warning, remove the \"${ sequence from the start and the }\" sequence from the end of this expression, leaving just the inner expression.\n\nTemplate interpolation syntax is still used to construct strings from expressions when the template includes multiple interpolation sequences or a mixture of literal strings and interpolations. This deprecation applies only to templates that consist entirely of a single interpolation sequence.", - Subject: e.Range().Ptr(), - }} - }) + walker := warnForDeprecatedInterpolationsWalker{ + // create some capacity so that we can deal with simple expressions + // without any further allocation during our walk. + contextStack: make([]warnForDeprecatedInterpolationsContext, 0, 16), + } + return hclsyntax.Walk(node, &walker) +} + +// warnForDeprecatedInterpolationsWalker is an implementation of +// hclsyntax.Walker that we use to generate deprecation warnings for template +// expressions that consist entirely of a single interpolation directive. +// That's always redundant in Terraform v0.12 and later, but tends to show up +// when people work from examples written for Terraform v0.11 or earlier. +type warnForDeprecatedInterpolationsWalker struct { + contextStack []warnForDeprecatedInterpolationsContext +} + +var _ hclsyntax.Walker = (*warnForDeprecatedInterpolationsWalker)(nil) + +type warnForDeprecatedInterpolationsContext int + +const ( + warnForDeprecatedInterpolationsNormal warnForDeprecatedInterpolationsContext = 0 + warnForDeprecatedInterpolationsObjKey warnForDeprecatedInterpolationsContext = 1 +) + +func (w *warnForDeprecatedInterpolationsWalker) Enter(node hclsyntax.Node) hcl.Diagnostics { + var diags hcl.Diagnostics + + context := warnForDeprecatedInterpolationsNormal + switch node := node.(type) { + case *hclsyntax.ObjectConsKeyExpr: + context = warnForDeprecatedInterpolationsObjKey + case *hclsyntax.TemplateWrapExpr: + // hclsyntax.TemplateWrapExpr is a special node type used by HCL only + // for the situation where a template is just a single interpolation, + // so we don't need to do anything further to distinguish that + // situation. ("normal" templates are *hclsyntax.TemplateExpr.) + + const summary = "Interpolation-only expressions are deprecated" + switch w.currentContext() { + case warnForDeprecatedInterpolationsObjKey: + // This case requires a different resolution in order to retain + // the same meaning, so we have a different detail message for + // it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: summary, + Detail: "Terraform 0.11 and earlier required all non-constant expressions to be provided via interpolation syntax, but this pattern is now deprecated.\n\nTo silence this warning, replace the \"${ opening sequence and the }\" closing sequence with opening and closing parentheses respectively. Parentheses are needed here to mark this as an expression to be evaluated, rather than as a literal string key.\n\nTemplate interpolation syntax is still used to construct strings from expressions when the template includes multiple interpolation sequences or a mixture of literal strings and interpolations. This deprecation applies only to templates that consist entirely of a single interpolation sequence.", + Subject: node.Range().Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: summary, + Detail: "Terraform 0.11 and earlier required all non-constant expressions to be provided via interpolation syntax, but this pattern is now deprecated. To silence this warning, remove the \"${ sequence from the start and the }\" sequence from the end of this expression, leaving just the inner expression.\n\nTemplate interpolation syntax is still used to construct strings from expressions when the template includes multiple interpolation sequences or a mixture of literal strings and interpolations. This deprecation applies only to templates that consist entirely of a single interpolation sequence.", + Subject: node.Range().Ptr(), + }) + } + } + + // Note the context of the current node for when we potentially visit + // child nodes. + w.contextStack = append(w.contextStack, context) + return diags +} + +func (w *warnForDeprecatedInterpolationsWalker) Exit(node hclsyntax.Node) hcl.Diagnostics { + w.contextStack = w.contextStack[:len(w.contextStack)-1] + return nil +} + +func (w *warnForDeprecatedInterpolationsWalker) currentContext() warnForDeprecatedInterpolationsContext { + if len(w.contextStack) == 0 { + return warnForDeprecatedInterpolationsNormal + } + return w.contextStack[len(w.contextStack)-1] } diff --git a/configs/compat_shim_test.go b/configs/compat_shim_test.go new file mode 100644 index 000000000..f6068bce9 --- /dev/null +++ b/configs/compat_shim_test.go @@ -0,0 +1,61 @@ +package configs + +import ( + "strings" + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +func TestWarnForDeprecatedInterpolationsInExpr(t *testing.T) { + tests := []struct { + Expr string + WantSubstr string + }{ + { + `"${foo}"`, + "leaving just the inner expression", + }, + { + `{"${foo}" = 1}`, + // Special message for object key expressions, because just + // removing the interpolation markers would change the meaning + // in that context. + "opening and closing parentheses respectively", + }, + { + `{upper("${foo}") = 1}`, + // But no special message if the template is just descended from an + // object key, because the special interpretation applies only to + // a naked reference in te object key position. + "leaving just the inner expression", + }, + } + + for _, test := range tests { + t.Run(test.Expr, func(t *testing.T) { + expr, diags := hclsyntax.ParseExpression([]byte(test.Expr), "", hcl.InitialPos) + if diags.HasErrors() { + t.Fatalf("parse error: %s", diags.Error()) + } + + diags = warnForDeprecatedInterpolationsInExpr(expr) + if !diagWarningsContainSubstring(diags, test.WantSubstr) { + t.Errorf("wrong warning message\nwant detail substring: %s\ngot: %s", test.WantSubstr, diags.Error()) + } + }) + } +} + +func diagWarningsContainSubstring(diags hcl.Diagnostics, want string) bool { + for _, diag := range diags { + if diag.Severity != hcl.DiagWarning { + continue + } + if strings.Contains(diag.Detail, want) { + return true + } + } + return false +} diff --git a/configs/config_build.go b/configs/config_build.go index c38a67926..345c67814 100644 --- a/configs/config_build.go +++ b/configs/config_build.go @@ -22,6 +22,9 @@ func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { } cfg.Root = cfg // Root module is self-referential. cfg.Children, diags = buildChildModules(cfg, walker) + + diags = append(diags, validateProviderConfigs(nil, cfg, false)...) + return cfg, diags } @@ -78,6 +81,15 @@ func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, child.Children, modDiags = buildChildModules(child, walker) diags = append(diags, modDiags...) + if mod.Backend != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Backend configuration ignored", + Detail: "Any selected backend applies to the entire configuration, so Terraform expects provider configurations only in the root module.\n\nThis is a warning rather than an error because it's sometimes convenient to temporarily call a root module as a child module for testing purposes, but this backend configuration block will have no effect.", + Subject: mod.Backend.DeclRange.Ptr(), + }) + } + ret[call.Name] = child } diff --git a/configs/config_build_test.go b/configs/config_build_test.go index 1092558b0..a977e432b 100644 --- a/configs/config_build_test.go +++ b/configs/config_build_test.go @@ -2,6 +2,7 @@ package configs import ( "fmt" + "io/ioutil" "path/filepath" "reflect" "sort" @@ -114,3 +115,167 @@ func TestBuildConfigDiags(t *testing.T) { t.Fatalf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) } } + +func TestBuildConfigChildModuleBackend(t *testing.T) { + parser := NewParser(nil) + mod, diags := parser.LoadConfigDir("testdata/nested-backend-warning") + assertNoDiagnostics(t, diags) + if mod == nil { + t.Fatal("got nil root module; want non-nil") + } + + cfg, diags := BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + // For the sake of this test we're going to just treat our + // SourceAddr as a path relative to our fixture directory. + // A "real" implementation of ModuleWalker should accept the + // various different source address syntaxes Terraform supports. + sourcePath := filepath.Join("testdata/nested-backend-warning", req.SourceAddr) + + mod, diags := parser.LoadConfigDir(sourcePath) + version, _ := version.NewVersion("1.0.0") + return mod, version, diags + }, + )) + + assertDiagnosticSummary(t, diags, "Backend configuration ignored") + + // we should still have module structure loaded + var got []string + cfg.DeepEach(func(c *Config) { + got = append(got, fmt.Sprintf("%s %s", strings.Join(c.Path, "."), c.Version)) + }) + sort.Strings(got) + want := []string{ + " ", + "child 1.0.0", + } + + if !reflect.DeepEqual(got, want) { + t.Fatalf("wrong result\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(want)) + } +} + +func TestBuildConfigInvalidModules(t *testing.T) { + testDir := "testdata/config-diagnostics" + dirs, err := ioutil.ReadDir(testDir) + if err != nil { + t.Fatal(err) + } + + for _, info := range dirs { + name := info.Name() + t.Run(name, func(t *testing.T) { + parser := NewParser(nil) + path := filepath.Join(testDir, name) + + mod, diags := parser.LoadConfigDir(path) + if diags.HasErrors() { + // these tests should only trigger errors that are caught in + // the config loader. + t.Errorf("error loading config dir") + for _, diag := range diags { + t.Logf("- %s", diag) + } + } + + readDiags := func(data []byte, _ error) []string { + var expected []string + for _, s := range strings.Split(string(data), "\n") { + msg := strings.TrimSpace(s) + msg = strings.ReplaceAll(msg, `\n`, "\n") + if msg != "" { + expected = append(expected, msg) + } + } + return expected + } + + // Load expected errors and warnings. + // Each line in the file is matched as a substring against the + // diagnostic outputs. + // Capturing part of the path and source range in the message lets + // us also ensure the diagnostic is being attributed to the + // expected location in the source, but is not required. + // The literal characters `\n` are replaced with newlines, but + // otherwise the string is unchanged. + expectedErrs := readDiags(ioutil.ReadFile(filepath.Join(testDir, name, "errors"))) + expectedWarnings := readDiags(ioutil.ReadFile(filepath.Join(testDir, name, "warnings"))) + + _, buildDiags := BuildConfig(mod, ModuleWalkerFunc( + func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + // for simplicity, these tests will treat all source + // addresses as relative to the root module + sourcePath := filepath.Join(path, req.SourceAddr) + mod, diags := parser.LoadConfigDir(sourcePath) + version, _ := version.NewVersion("1.0.0") + return mod, version, diags + }, + )) + + // we can make this less repetitive later if we want + for _, msg := range expectedErrs { + found := false + for _, diag := range buildDiags { + if diag.Severity == hcl.DiagError && strings.Contains(diag.Error(), msg) { + found = true + break + } + } + + if !found { + t.Errorf("Expected error diagnostic containing %q", msg) + } + } + + for _, diag := range buildDiags { + if diag.Severity != hcl.DiagError { + continue + } + found := false + for _, msg := range expectedErrs { + if strings.Contains(diag.Error(), msg) { + found = true + break + } + } + + if !found { + t.Errorf("Unexpected error: %q", diag) + } + } + + for _, msg := range expectedWarnings { + found := false + for _, diag := range buildDiags { + if diag.Severity == hcl.DiagWarning && strings.Contains(diag.Error(), msg) { + found = true + break + } + } + + if !found { + t.Errorf("Expected warning diagnostic containing %q", msg) + } + } + + for _, diag := range buildDiags { + if diag.Severity != hcl.DiagWarning { + continue + } + found := false + for _, msg := range expectedWarnings { + if strings.Contains(diag.Error(), msg) { + found = true + break + } + } + + if !found { + t.Errorf("Unexpected warning: %q", diag) + } + } + + }) + } +} diff --git a/configs/configload/getter.go b/configs/configload/getter.go deleted file mode 100644 index d0c4567b6..000000000 --- a/configs/configload/getter.go +++ /dev/null @@ -1,168 +0,0 @@ -package configload - -import ( - "fmt" - "log" - "os" - "path/filepath" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - getter "github.com/hashicorp/go-getter" -) - -// We configure our own go-getter detector and getter sets here, because -// the set of sources we support is part of Terraform's documentation and -// so we don't want any new sources introduced in go-getter to sneak in here -// and work even though they aren't documented. This also insulates us from -// any meddling that might be done by other go-getter callers linked into our -// executable. - -var goGetterDetectors = []getter.Detector{ - new(getter.GitHubDetector), - new(getter.GitDetector), - new(getter.BitBucketDetector), - new(getter.GCSDetector), - new(getter.S3Detector), - new(getter.FileDetector), -} - -var goGetterNoDetectors = []getter.Detector{} - -var goGetterDecompressors = map[string]getter.Decompressor{ - "bz2": new(getter.Bzip2Decompressor), - "gz": new(getter.GzipDecompressor), - "xz": new(getter.XzDecompressor), - "zip": new(getter.ZipDecompressor), - - "tar.bz2": new(getter.TarBzip2Decompressor), - "tar.tbz2": new(getter.TarBzip2Decompressor), - - "tar.gz": new(getter.TarGzipDecompressor), - "tgz": new(getter.TarGzipDecompressor), - - "tar.xz": new(getter.TarXzDecompressor), - "txz": new(getter.TarXzDecompressor), -} - -var goGetterGetters = map[string]getter.Getter{ - "file": new(getter.FileGetter), - "gcs": new(getter.GCSGetter), - "git": new(getter.GitGetter), - "hg": new(getter.HgGetter), - "s3": new(getter.S3Getter), - "http": getterHTTPGetter, - "https": getterHTTPGetter, -} - -var getterHTTPClient = cleanhttp.DefaultClient() - -var getterHTTPGetter = &getter.HttpGetter{ - Client: getterHTTPClient, - Netrc: true, -} - -// A reusingGetter is a helper for the module installer that remembers -// the final resolved addresses of all of the sources it has already been -// asked to install, and will copy from a prior installation directory if -// it has the same resolved source address. -// -// The keys in a reusingGetter are resolved and trimmed source addresses -// (with a scheme always present, and without any "subdir" component), -// and the values are the paths where each source was previously installed. -type reusingGetter map[string]string - -// getWithGoGetter retrieves the package referenced in the given address -// into the installation path and then returns the full path to any subdir -// indicated in the address. -// -// The errors returned by this function are those surfaced by the underlying -// go-getter library, which have very inconsistent quality as -// end-user-actionable error messages. At this time we do not have any -// reasonable way to improve these error messages at this layer because -// the underlying errors are not separatelyr recognizable. -func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { - packageAddr, subDir := splitAddrSubdir(addr) - - log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) - - realAddr, err := getter.Detect(packageAddr, instPath, goGetterDetectors) - if err != nil { - return "", err - } - - var realSubDir string - realAddr, realSubDir = splitAddrSubdir(realAddr) - if realSubDir != "" { - subDir = filepath.Join(realSubDir, subDir) - } - - if realAddr != packageAddr { - log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) - } - - if prevDir, exists := g[realAddr]; exists { - log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) - err := os.Mkdir(instPath, os.ModePerm) - if err != nil { - return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) - } - err = copyDir(instPath, prevDir) - if err != nil { - return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) - } - } else { - log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) - client := getter.Client{ - Src: realAddr, - Dst: instPath, - Pwd: instPath, - - Mode: getter.ClientModeDir, - - Detectors: goGetterNoDetectors, // we already did detection above - Decompressors: goGetterDecompressors, - Getters: goGetterGetters, - } - err = client.Get() - if err != nil { - return "", err - } - // Remember where we installed this so we might reuse this directory - // on subsequent calls to avoid re-downloading. - g[realAddr] = instPath - } - - // Our subDir string can contain wildcards until this point, so that - // e.g. a subDir of * can expand to one top-level directory in a .tar.gz - // archive. Now that we've expanded the archive successfully we must - // resolve that into a concrete path. - var finalDir string - if subDir != "" { - finalDir, err = getter.SubdirGlob(instPath, subDir) - log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) - if err != nil { - return "", err - } - } else { - finalDir = instPath - } - - // If we got this far then we have apparently succeeded in downloading - // the requested object! - return filepath.Clean(finalDir), nil -} - -// splitAddrSubdir splits the given address (which is assumed to be a -// registry address or go-getter-style address) into a package portion -// and a sub-directory portion. -// -// The package portion defines what should be downloaded and then the -// sub-directory portion, if present, specifies a sub-directory within -// the downloaded object (an archive, VCS repository, etc) that contains -// the module's configuration files. -// -// The subDir portion will be returned as empty if no subdir separator -// ("//") is present in the address. -func splitAddrSubdir(addr string) (packageAddr, subDir string) { - return getter.SourceDirSubdir(addr) -} diff --git a/configs/configload/loader_load.go b/configs/configload/loader_load.go index eab38495c..e3c9bdca6 100644 --- a/configs/configload/loader_load.go +++ b/configs/configload/loader_load.go @@ -100,83 +100,5 @@ func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, } } - // The providers associated with expanding modules must be present in the proxy/passed providers - // block. Guarding here for accessing the module call just in case. - if mc, exists := req.Parent.Module.ModuleCalls[req.Name]; exists { - var validateDiags hcl.Diagnostics - validateDiags = validateProviderConfigs(mc, mod, req.Parent, validateDiags) - diags = append(diags, validateDiags...) - } return mod, record.Version, diags } - -func validateProviderConfigs(mc *configs.ModuleCall, mod *configs.Module, parent *configs.Config, diags hcl.Diagnostics) hcl.Diagnostics { - if mc.Count != nil || mc.ForEach != nil || mc.DependsOn != nil { - for key, pc := range mod.ProviderConfigs { - // Use these to track if a provider is configured (not allowed), - // or if we've found its matching proxy - var isConfigured bool - var foundMatchingProxy bool - - // Validate the config against an empty schema to see if it's empty. - _, pcConfigDiags := pc.Config.Content(&hcl.BodySchema{}) - if pcConfigDiags.HasErrors() || pc.Version.Required != nil { - isConfigured = true - } - - // If it is empty or only has an alias, - // does this provider exist in our proxy configs? - for _, r := range mc.Providers { - // Must match on name and Alias - if pc.Name == r.InChild.Name && pc.Alias == r.InChild.Alias { - foundMatchingProxy = true - break - } - } - if isConfigured || !foundMatchingProxy { - if mc.Count != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module does not support count", - Detail: fmt.Sprintf(moduleProviderError, mc.Name, "count", key, pc.NameRange), - Subject: mc.Count.Range().Ptr(), - }) - } - if mc.ForEach != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module does not support for_each", - Detail: fmt.Sprintf(moduleProviderError, mc.Name, "for_each", key, pc.NameRange), - Subject: mc.ForEach.Range().Ptr(), - }) - } - if mc.DependsOn != nil { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Module does not support depends_on", - Detail: fmt.Sprintf(moduleProviderError, mc.Name, "depends_on", key, pc.NameRange), - Subject: mc.SourceAddrRange.Ptr(), - }) - } - } - } - } - // If this module has further parents, go through them recursively - if !parent.Path.IsRoot() { - // Use the path to get the name so we can look it up in the parent module calls - path := parent.Path - name := path[len(path)-1] - // This parent's module call, so we can check for count/for_each here, - // guarding with exists just in case. We pass the diags through to the recursive - // call so they will accumulate if needed. - if mc, exists := parent.Parent.Module.ModuleCalls[name]; exists { - return validateProviderConfigs(mc, mod, parent.Parent, diags) - } - } - - return diags -} - -var moduleProviderError = `Module "%s" cannot be used with %s because it contains a nested provider configuration for "%s", at %s. - -This module can be made compatible with %[2]s by changing it to receive all of its provider configurations from the calling module, by using the "providers" argument in the calling module block.` diff --git a/configs/configload/loader_load_test.go b/configs/configload/loader_load_test.go index b7f396cf2..845b227a3 100644 --- a/configs/configload/loader_load_test.go +++ b/configs/configload/loader_load_test.go @@ -1,7 +1,6 @@ package configload import ( - "fmt" "path/filepath" "reflect" "sort" @@ -81,65 +80,3 @@ func TestLoaderLoadConfig_addVersion(t *testing.T) { t.Fatalf("wrong error\ngot:\n%s\n\nwant: containing %q", got, want) } } - -func TestLoaderLoadConfig_moduleExpand(t *testing.T) { - // We do not allow providers to be configured in expanding modules - // In addition, if a provider is present but an empty block, it is allowed, - // but IFF a provider is passed through the module call - paths := []string{"provider-configured", "no-provider-passed", "nested-provider", "more-nested-provider"} - for _, p := range paths { - fixtureDir := filepath.Clean(fmt.Sprintf("testdata/expand-modules/%s", p)) - loader, err := NewLoader(&Config{ - ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), - }) - if err != nil { - t.Fatalf("unexpected error from NewLoader at path %s: %s", p, err) - } - - _, diags := loader.LoadConfig(fixtureDir) - if !diags.HasErrors() { - t.Fatalf("success; want error at path %s", p) - } - got := diags.Error() - want := "Module does not support count" - if !strings.Contains(got, want) { - t.Fatalf("wrong error at path %s \ngot:\n%s\n\nwant: containing %q", p, got, want) - } - } -} - -func TestLoaderLoadConfig_moduleExpandValid(t *testing.T) { - // This tests for when valid configs are passing a provider through as a proxy, - // either with or without an alias present. - fixtureDir := filepath.Clean("testdata/expand-modules/valid") - loader, err := NewLoader(&Config{ - ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), - }) - if err != nil { - t.Fatalf("unexpected error from NewLoader: %s", err) - } - - _, diags := loader.LoadConfig(fixtureDir) - assertNoDiagnostics(t, diags) -} - -func TestLoaderLoadConfig_moduleDependsOnProviders(t *testing.T) { - // We do not allow providers to be configured in module using depends_on. - fixtureDir := filepath.Clean("testdata/module-depends-on") - loader, err := NewLoader(&Config{ - ModulesDir: filepath.Join(fixtureDir, ".terraform/modules"), - }) - if err != nil { - t.Fatalf("unexpected error from NewLoader: %s", err) - } - - _, diags := loader.LoadConfig(fixtureDir) - if !diags.HasErrors() { - t.Fatal("success; want error") - } - got := diags.Error() - want := "Module does not support depends_on" - if !strings.Contains(got, want) { - t.Fatalf("wrong error\ngot:\n%s\n\nwant: containing %q", got, want) - } -} diff --git a/configs/configload/loader_test.go b/configs/configload/loader_test.go index 54b6ed2f2..7b3483b4a 100644 --- a/configs/configload/loader_test.go +++ b/configs/configload/loader_test.go @@ -1,92 +1,12 @@ package configload import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" "testing" - "github.com/go-test/deep" "github.com/hashicorp/hcl/v2" "github.com/zclconf/go-cty/cty" ) -// tempChdir copies the contents of the given directory to a temporary -// directory and changes the test process's current working directory to -// point to that directory. Also returned is a function that should be -// called at the end of the test (e.g. via "defer") to restore the previous -// working directory. -// -// Tests using this helper cannot safely be run in parallel with other tests. -func tempChdir(t *testing.T, sourceDir string) (string, func()) { - t.Helper() - - tmpDir, err := ioutil.TempDir("", "terraform-configload") - if err != nil { - t.Fatalf("failed to create temporary directory: %s", err) - return "", nil - } - - if err := copyDir(tmpDir, sourceDir); err != nil { - t.Fatalf("failed to copy fixture to temporary directory: %s", err) - return "", nil - } - - oldDir, err := os.Getwd() - if err != nil { - t.Fatalf("failed to determine current working directory: %s", err) - return "", nil - } - - err = os.Chdir(tmpDir) - if err != nil { - t.Fatalf("failed to switch to temp dir %s: %s", tmpDir, err) - return "", nil - } - - t.Logf("tempChdir switched to %s after copying from %s", tmpDir, sourceDir) - - return tmpDir, func() { - err := os.Chdir(oldDir) - if err != nil { - panic(fmt.Errorf("failed to restore previous working directory %s: %s", oldDir, err)) - } - - if os.Getenv("TF_CONFIGLOAD_TEST_KEEP_TMP") == "" { - os.RemoveAll(tmpDir) - } - } -} - -// tempChdirLoader is a wrapper around tempChdir that also returns a Loader -// whose modules directory is at the conventional location within the -// created temporary directory. -func tempChdirLoader(t *testing.T, sourceDir string) (*Loader, func()) { - t.Helper() - - _, done := tempChdir(t, sourceDir) - modulesDir := filepath.Clean(".terraform/modules") - - err := os.MkdirAll(modulesDir, os.ModePerm) - if err != nil { - done() // undo the chdir in tempChdir so we can safely run other tests - t.Fatalf("failed to create modules directory: %s", err) - return nil, nil - } - - loader, err := NewLoader(&Config{ - ModulesDir: modulesDir, - }) - if err != nil { - done() // undo the chdir in tempChdir so we can safely run other tests - t.Fatalf("failed to create loader: %s", err) - return nil, nil - } - - return loader, done -} - func assertNoDiagnostics(t *testing.T, diags hcl.Diagnostics) bool { t.Helper() return assertDiagnosticCount(t, diags, 0) @@ -103,34 +23,6 @@ func assertDiagnosticCount(t *testing.T, diags hcl.Diagnostics, want int) bool { } return false } - -func assertDiagnosticSummary(t *testing.T, diags hcl.Diagnostics, want string) bool { - t.Helper() - - for _, diag := range diags { - if diag.Summary == want { - return false - } - } - - t.Errorf("missing diagnostic summary %q", want) - for _, diag := range diags { - t.Logf("- %s", diag) - } - return true -} - -func assertResultDeepEqual(t *testing.T, got, want interface{}) bool { - t.Helper() - if diff := deep.Equal(got, want); diff != nil { - for _, problem := range diff { - t.Errorf("%s", problem) - } - return true - } - return false -} - func assertResultCtyEqual(t *testing.T, got, want cty.Value) bool { t.Helper() if !got.RawEquals(want) { diff --git a/configs/configload/module_mgr.go b/configs/configload/module_mgr.go index 16871e310..cf930f537 100644 --- a/configs/configload/module_mgr.go +++ b/configs/configload/module_mgr.go @@ -60,17 +60,3 @@ func (m *moduleMgr) readModuleManifestSnapshot() error { m.manifest, err = modsdir.ReadManifestSnapshot(r) return err } - -// writeModuleManifestSnapshot writes a snapshot of the current manifest -// to the filesystem. -// -// The caller must guarantee no concurrent modifications of the manifest for -// the duration of a call to this function, or the behavior is undefined. -func (m *moduleMgr) writeModuleManifestSnapshot() error { - w, err := m.FS.Create(m.manifestSnapshotPath()) - if err != nil { - return err - } - - return m.manifest.WriteSnapshot(w) -} diff --git a/configs/configload/testdata/expand-modules/more-nested-provider/.terraform/modules/modules.json b/configs/configload/testdata/expand-modules/more-nested-provider/.terraform/modules/modules.json deleted file mode 100644 index 203e0876d..000000000 --- a/configs/configload/testdata/expand-modules/more-nested-provider/.terraform/modules/modules.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "Modules": [ - { - "Key": "", - "Source": "", - "Dir": "testdata/expand-modules/nested-provider" - }, - { - "Key": "child", - "Source": "./child", - "Dir": "testdata/expand-modules/nested-provider/child" - }, - { - "Key": "child2", - "Source": "./child2", - "Dir": "testdata/expand-modules/nested-provider/child2" - }, - { - "Key": "child3", - "Source": "./child3", - "Dir": "testdata/expand-modules/nested-provider/child3" - }, - { - "Key": "child.child2", - "Source": "../child2", - "Dir": "testdata/expand-modules/nested-provider/child2" - }, - { - "Key": "child.child2.child3", - "Source": "../child3", - "Dir": "testdata/expand-modules/nested-provider/child3" - } - ] -} diff --git a/configs/configload/testdata/expand-modules/more-nested-provider/child/main.tf b/configs/configload/testdata/expand-modules/more-nested-provider/child/main.tf deleted file mode 100644 index b4bbb38c1..000000000 --- a/configs/configload/testdata/expand-modules/more-nested-provider/child/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child2" { - source = "../child2" - -} diff --git a/configs/configload/testdata/expand-modules/more-nested-provider/child2/main.tf b/configs/configload/testdata/expand-modules/more-nested-provider/child2/main.tf deleted file mode 100644 index d107faad8..000000000 --- a/configs/configload/testdata/expand-modules/more-nested-provider/child2/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child3" { - source = "../child3" - -} diff --git a/configs/configload/testdata/expand-modules/more-nested-provider/child3/main.tf b/configs/configload/testdata/expand-modules/more-nested-provider/child3/main.tf deleted file mode 100644 index 01cd85423..000000000 --- a/configs/configload/testdata/expand-modules/more-nested-provider/child3/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - -} - -output "my_output" { - value = "my output" -} \ No newline at end of file diff --git a/configs/configload/testdata/expand-modules/nested-provider/.terraform/modules/modules.json b/configs/configload/testdata/expand-modules/nested-provider/.terraform/modules/modules.json deleted file mode 100644 index 28f813039..000000000 --- a/configs/configload/testdata/expand-modules/nested-provider/.terraform/modules/modules.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "Modules": [ - { - "Key": "", - "Source": "", - "Dir": "testdata/expand-modules/nested-provider" - }, - { - "Key": "child", - "Source": "./child", - "Dir": "testdata/expand-modules/nested-provider/child" - }, - { - "Key": "child2", - "Source": "./child2", - "Dir": "testdata/expand-modules/nested-provider/child2" - }, - { - "Key": "child.child2", - "Source": "../child2", - "Dir": "testdata/expand-modules/nested-provider/child2" - } - ] -} diff --git a/configs/configload/testdata/expand-modules/nested-provider/child/main.tf b/configs/configload/testdata/expand-modules/nested-provider/child/main.tf deleted file mode 100644 index b4bbb38c1..000000000 --- a/configs/configload/testdata/expand-modules/nested-provider/child/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child2" { - source = "../child2" - -} diff --git a/configs/configload/testdata/expand-modules/nested-provider/child2/main.tf b/configs/configload/testdata/expand-modules/nested-provider/child2/main.tf deleted file mode 100644 index 01cd85423..000000000 --- a/configs/configload/testdata/expand-modules/nested-provider/child2/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - -} - -output "my_output" { - value = "my output" -} \ No newline at end of file diff --git a/configs/configload/testdata/expand-modules/nested-provider/root.tf b/configs/configload/testdata/expand-modules/nested-provider/root.tf deleted file mode 100644 index 71b90f6d6..000000000 --- a/configs/configload/testdata/expand-modules/nested-provider/root.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "child" { - count = 1 - source = "./child" -} diff --git a/configs/configload/testdata/expand-modules/no-provider-passed/.terraform/modules/modules.json b/configs/configload/testdata/expand-modules/no-provider-passed/.terraform/modules/modules.json deleted file mode 100644 index 8c0d92367..000000000 --- a/configs/configload/testdata/expand-modules/no-provider-passed/.terraform/modules/modules.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "Modules": [ - { - "Key": "", - "Source": "", - "Dir": "testdata/expand-modules/no-provider-passed" - }, - { - "Key": "child", - "Source": "./child", - "Dir": "testdata/expand-modules/no-provider-passed/child" - } - ] -} diff --git a/configs/configload/testdata/expand-modules/no-provider-passed/child/main.tf b/configs/configload/testdata/expand-modules/no-provider-passed/child/main.tf deleted file mode 100644 index a5c3c47b1..000000000 --- a/configs/configload/testdata/expand-modules/no-provider-passed/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { -} - -output "my_output" { - value = "my output" -} - diff --git a/configs/configload/testdata/expand-modules/no-provider-passed/root.tf b/configs/configload/testdata/expand-modules/no-provider-passed/root.tf deleted file mode 100644 index 195cfeb5d..000000000 --- a/configs/configload/testdata/expand-modules/no-provider-passed/root.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "aws" { - alias = "usw2" - region = "us-west-2" -} -module "child" { - count = 1 - source = "./child" - # To make this test fail, add a valid providers {} block passing "aws" to the child -} diff --git a/configs/configload/testdata/expand-modules/provider-configured/.terraform/modules/modules.json b/configs/configload/testdata/expand-modules/provider-configured/.terraform/modules/modules.json deleted file mode 100644 index b7a474ea3..000000000 --- a/configs/configload/testdata/expand-modules/provider-configured/.terraform/modules/modules.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "Modules": [ - { - "Key": "", - "Source": "", - "Dir": "testdata/expand-modules/provider-configured" - }, - { - "Key": "child", - "Source": "./child", - "Dir": "testdata/expand-modules/provider-configured/child" - } - ] -} diff --git a/configs/configload/testdata/expand-modules/provider-configured/child/main.tf b/configs/configload/testdata/expand-modules/provider-configured/child/main.tf deleted file mode 100644 index 61ff4b572..000000000 --- a/configs/configload/testdata/expand-modules/provider-configured/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "aws" { - region = "us-west-2" -} - -output "my_output" { - value = "my output" -} diff --git a/configs/configload/testdata/expand-modules/provider-configured/root.tf b/configs/configload/testdata/expand-modules/provider-configured/root.tf deleted file mode 100644 index 953d4ab55..000000000 --- a/configs/configload/testdata/expand-modules/provider-configured/root.tf +++ /dev/null @@ -1,11 +0,0 @@ -provider "aws" { - region = "us-west-2" -} - -module "child" { - count = 1 - source = "./child" - providers = { - aws = aws.w2 - } -} diff --git a/configs/configload/testdata/expand-modules/valid/.terraform/modules/modules.json b/configs/configload/testdata/expand-modules/valid/.terraform/modules/modules.json deleted file mode 100644 index 0bdb37d5b..000000000 --- a/configs/configload/testdata/expand-modules/valid/.terraform/modules/modules.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Modules": [ - { - "Key": "", - "Source": "", - "Dir": "testdata/expand-modules/valid" - }, - { - "Key": "child", - "Source": "./child", - "Dir": "testdata/expand-modules/valid/child" - }, - { - "Key": "child_with_alias", - "Source": "./child-with-alias", - "Dir": "testdata/expand-modules/valid/child-with-alias" - } - ] -} diff --git a/configs/configload/testdata/expand-modules/valid/child-with-alias/main.tf b/configs/configload/testdata/expand-modules/valid/child-with-alias/main.tf deleted file mode 100644 index 3a59131cb..000000000 --- a/configs/configload/testdata/expand-modules/valid/child-with-alias/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -provider "aws" { - alias = "east" -} - -output "my_output" { - value = "my output" -} - diff --git a/configs/configload/testdata/expand-modules/valid/root.tf b/configs/configload/testdata/expand-modules/valid/root.tf deleted file mode 100644 index 27205a508..000000000 --- a/configs/configload/testdata/expand-modules/valid/root.tf +++ /dev/null @@ -1,20 +0,0 @@ -provider "aws" { - region = "us-east-1" - alias = "east" -} - -module "child" { - count = 1 - source = "./child" - providers = { - aws = aws.east - } -} - -module "child_with_alias" { - for_each = toset(["a", "b"]) - source = "./child-with-alias" - providers = { - aws.east = aws.east - } -} \ No newline at end of file diff --git a/configs/configschema/internal_validate.go b/configs/configschema/internal_validate.go index ebf1abbab..9114e0ab2 100644 --- a/configs/configschema/internal_validate.go +++ b/configs/configschema/internal_validate.go @@ -34,7 +34,7 @@ func (b *Block) internalValidate(prefix string, err error) error { if !validName.MatchString(name) { err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) } - if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { + if !attrS.Optional && !attrS.Required && !attrS.Computed { err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) } if attrS.Optional && attrS.Required { diff --git a/configs/configschema/path.go b/configs/configschema/path.go new file mode 100644 index 000000000..4c48c1a04 --- /dev/null +++ b/configs/configschema/path.go @@ -0,0 +1,29 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// AttributeByPath looks up the Attribute schema which corresponds to the given +// cty.Path. A nil value is returned if the given path does not correspond to a +// specific attribute. +// TODO: this will need to be updated for nested attributes +func (b *Block) AttributeByPath(path cty.Path) *Attribute { + block := b + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + if attr := block.Attributes[step.Name]; attr != nil { + return attr + } + + if nestedBlock := block.BlockTypes[step.Name]; nestedBlock != nil { + block = &nestedBlock.Block + continue + } + + return nil + } + } + return nil +} diff --git a/configs/configschema/path_test.go b/configs/configschema/path_test.go new file mode 100644 index 000000000..c4f673bad --- /dev/null +++ b/configs/configschema/path_test.go @@ -0,0 +1,121 @@ +package configschema + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestAttributeByPath(t *testing.T) { + schema := &Block{ + Attributes: map[string]*Attribute{ + "a1": {Description: "a1"}, + "a2": {Description: "a2"}, + }, + BlockTypes: map[string]*NestedBlock{ + "b1": { + Nesting: NestingList, + Block: Block{ + Attributes: map[string]*Attribute{ + "a3": {Description: "a3"}, + "a4": {Description: "a4"}, + }, + BlockTypes: map[string]*NestedBlock{ + "b2": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "a5": {Description: "a5"}, + "a6": {Description: "a6"}, + }, + }, + }, + }, + }, + }, + "b3": { + Nesting: NestingMap, + Block: Block{ + Attributes: map[string]*Attribute{ + "a7": {Description: "a7"}, + "a8": {Description: "a8"}, + }, + BlockTypes: map[string]*NestedBlock{ + "b4": { + Nesting: NestingSet, + Block: Block{ + Attributes: map[string]*Attribute{ + "a9": {Description: "a9"}, + "a10": {Description: "a10"}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range []struct { + path cty.Path + attrDescription string + exists bool + }{ + { + cty.GetAttrPath("a2"), + "a2", + true, + }, + { + cty.GetAttrPath("b1"), + "block", + false, + }, + { + cty.GetAttrPath("b1").IndexInt(1).GetAttr("a3"), + "a3", + true, + }, + { + cty.GetAttrPath("b1").IndexInt(1).GetAttr("b2").IndexString("foo").GetAttr("a7"), + "missing", + false, + }, + { + cty.GetAttrPath("b1").IndexInt(1).GetAttr("b2").IndexString("foo").GetAttr("a6"), + "a6", + true, + }, + { + cty.GetAttrPath("b3").IndexString("foo").GetAttr("b2").IndexString("foo").GetAttr("a7"), + "missing_block", + false, + }, + { + cty.GetAttrPath("b3").IndexString("foo").GetAttr("a7"), + "a7", + true, + }, + { + // Index steps don't apply to the schema, so the set Index value doesn't matter. + cty.GetAttrPath("b3").IndexString("foo").GetAttr("b4").Index(cty.EmptyObjectVal).GetAttr("a9"), + "a9", + true, + }, + } { + t.Run(tc.attrDescription, func(t *testing.T) { + attr := schema.AttributeByPath(tc.path) + if !tc.exists && attr == nil { + return + } + + if attr == nil { + t.Fatalf("missing attribute from path %#v\n", tc.path) + } + + if attr.Description != tc.attrDescription { + t.Fatalf("expected Attribute for %q, got %#v\n", tc.attrDescription, attr) + } + }) + } +} diff --git a/configs/module.go b/configs/module.go index 9ca4c2b76..126eebbdb 100644 --- a/configs/module.go +++ b/configs/module.go @@ -167,11 +167,9 @@ func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { func (m *Module) appendFile(file *File) hcl.Diagnostics { var diags hcl.Diagnostics - for _, constraint := range file.CoreVersionConstraints { - // If there are any conflicting requirements then we'll catch them - // when we actually check these constraints. - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } + // If there are any conflicting requirements then we'll catch them + // when we actually check these constraints. + m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...) m.ActiveExperiments = experiments.SetUnion(m.ActiveExperiments, file.ActiveExperiments) @@ -341,9 +339,7 @@ func (m *Module) mergeFile(file *File) hcl.Diagnostics { // would union together across multiple files anyway, but we'll // allow it and have each override file clobber any existing list. m.CoreVersionConstraints = nil - for _, constraint := range file.CoreVersionConstraints { - m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) - } + m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...) } if len(file.Backends) != 0 { diff --git a/configs/module_merge_body.go b/configs/module_merge_body.go index 7b51eae85..6ae64a2a9 100644 --- a/configs/module_merge_body.go +++ b/configs/module_merge_body.go @@ -112,9 +112,7 @@ func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyConte } content.Blocks = append(content.Blocks, block) } - for _, block := range override.Blocks { - content.Blocks = append(content.Blocks, block) - } + content.Blocks = append(content.Blocks, override.Blocks...) return content } diff --git a/configs/parser_test.go b/configs/parser_test.go index a87ad68fd..cb2239282 100644 --- a/configs/parser_test.go +++ b/configs/parser_test.go @@ -38,16 +38,6 @@ func testParser(files map[string]string) *Parser { return NewParser(fs) } -// testModuleFromFile reads a single file, wraps it in a module, and returns -// it. This is a helper for use in unit tests. -func testModuleFromFile(filename string) (*Module, hcl.Diagnostics) { - parser := NewParser(nil) - f, diags := parser.LoadConfigFile(filename) - mod, modDiags := NewModule([]*File{f}, nil) - diags = append(diags, modDiags...) - return mod, modDiags -} - // testModuleConfigFrom File reads a single file from the given path as a // module and returns its configuration. This is a helper for use in unit tests. func testModuleConfigFromFile(filename string) (*Config, hcl.Diagnostics) { diff --git a/configs/provider_requirements.go b/configs/provider_requirements.go index ef746f3fa..f870e1cc9 100644 --- a/configs/provider_requirements.go +++ b/configs/provider_requirements.go @@ -1,6 +1,8 @@ package configs import ( + "fmt" + version "github.com/hashicorp/go-version" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/terraform/addrs" @@ -17,6 +19,7 @@ type RequiredProvider struct { Type addrs.Provider Requirement VersionConstraint DeclRange hcl.Range + Aliases []addrs.LocalProviderConfig } type RequiredProviders struct { @@ -26,118 +29,202 @@ type RequiredProviders struct { func decodeRequiredProvidersBlock(block *hcl.Block) (*RequiredProviders, hcl.Diagnostics) { attrs, diags := block.Body.JustAttributes() + if diags.HasErrors() { + return nil, diags + } + ret := &RequiredProviders{ RequiredProviders: make(map[string]*RequiredProvider), DeclRange: block.DefRange, } + for name, attr := range attrs { - expr, err := attr.Expr.Value(nil) - if err != nil { - diags = append(diags, err...) - } - - // verify that the local name is already localized or produce an error. - nameDiags := checkProviderNameNormalized(name, attr.Expr.Range()) - diags = append(diags, nameDiags...) - rp := &RequiredProvider{ Name: name, DeclRange: attr.Expr.Range(), } - switch { - case expr.Type().IsPrimitiveType(): + // Look for a single static string, in case we have the legacy version-only + // format in the configuration. + if expr, err := attr.Expr.Value(nil); err == nil && expr.Type().IsPrimitiveType() { vc, reqDiags := decodeVersionConstraint(attr) diags = append(diags, reqDiags...) - rp.Requirement = vc - case expr.Type().IsObjectType(): - if expr.Type().HasAttribute("version") { + pType, err := addrs.ParseProviderPart(rp.Name) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider name", + Detail: err.Error(), + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + rp.Requirement = vc + rp.Type = addrs.ImpliedProviderForUnqualifiedType(pType) + ret.RequiredProviders[name] = rp + + continue + } + + // verify that the local name is already localized or produce an error. + nameDiags := checkProviderNameNormalized(name, attr.Expr.Range()) + if nameDiags.HasErrors() { + diags = append(diags, nameDiags...) + continue + } + + kvs, mapDiags := hcl.ExprMap(attr.Expr) + if mapDiags.HasErrors() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid required_providers object", + Detail: "required_providers entries must be strings or objects.", + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + for _, kv := range kvs { + key, keyDiags := kv.Key.Value(nil) + if keyDiags.HasErrors() { + diags = append(diags, keyDiags...) + continue + } + + if key.Type() != cty.String { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid Attribute", + Detail: fmt.Sprintf("Invalid attribute value for provider requirement: %#v", key), + Subject: kv.Key.Range().Ptr(), + }) + continue + } + + switch key.AsString() { + case "version": vc := VersionConstraint{ DeclRange: attr.Range, } - constraint := expr.GetAttr("version") - if !constraint.Type().Equals(cty.String) || constraint.IsNull() { + + constraint, valDiags := kv.Value.Value(nil) + if valDiags.HasErrors() || !constraint.Type().Equals(cty.String) { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid version constraint", Detail: "Version must be specified as a string.", - Subject: attr.Expr.Range().Ptr(), + Subject: kv.Value.Range().Ptr(), }) - } else { - constraintStr := constraint.AsString() - constraints, err := version.NewConstraint(constraintStr) - if err != nil { - // NewConstraint doesn't return user-friendly errors, so we'll just - // ignore the provided error and produce our own generic one. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid version constraint", - Detail: "This string does not use correct version constraint syntax.", - Subject: attr.Expr.Range().Ptr(), - }) - } else { - vc.Required = constraints - rp.Requirement = vc - } + continue } - } - if expr.Type().HasAttribute("source") { - source := expr.GetAttr("source") - if !source.Type().Equals(cty.String) || source.IsNull() { + + constraintStr := constraint.AsString() + constraints, err := version.NewConstraint(constraintStr) + if err != nil { + // NewConstraint doesn't return user-friendly errors, so we'll just + // ignore the provided error and produce our own generic one. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: "This string does not use correct version constraint syntax.", + Subject: kv.Value.Range().Ptr(), + }) + continue + } + + vc.Required = constraints + rp.Requirement = vc + + case "source": + source, err := kv.Value.Value(nil) + if err != nil || !source.Type().Equals(cty.String) { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid source", Detail: "Source must be specified as a string.", - Subject: attr.Expr.Range().Ptr(), + Subject: kv.Value.Range().Ptr(), }) - } else { - rp.Source = source.AsString() - - fqn, sourceDiags := addrs.ParseProviderSourceString(rp.Source) - - if sourceDiags.HasErrors() { - hclDiags := sourceDiags.ToHCL() - // The diagnostics from ParseProviderSourceString don't contain - // source location information because it has no context to compute - // them from, and so we'll add those in quickly here before we - // return. - for _, diag := range hclDiags { - if diag.Subject == nil { - diag.Subject = attr.Expr.Range().Ptr() - } - } - diags = append(diags, hclDiags...) - } else { - rp.Type = fqn - } - } - } - attrTypes := expr.Type().AttributeTypes() - for name := range attrTypes { - if name == "version" || name == "source" { continue } + + fqn, sourceDiags := addrs.ParseProviderSourceString(source.AsString()) + if sourceDiags.HasErrors() { + hclDiags := sourceDiags.ToHCL() + // The diagnostics from ParseProviderSourceString don't contain + // source location information because it has no context to compute + // them from, and so we'll add those in quickly here before we + // return. + for _, diag := range hclDiags { + if diag.Subject == nil { + diag.Subject = kv.Value.Range().Ptr() + } + } + diags = append(diags, hclDiags...) + continue + } + + rp.Source = source.AsString() + rp.Type = fqn + + case "configuration_aliases": + exprs, listDiags := hcl.ExprList(kv.Value) + if listDiags.HasErrors() { + diags = append(diags, listDiags...) + continue + } + + for _, expr := range exprs { + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + if travDiags.HasErrors() { + diags = append(diags, travDiags...) + continue + } + + addr, cfgDiags := ParseProviderConfigCompact(traversal) + if cfgDiags.HasErrors() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid configuration_aliases value", + Detail: `Configuration aliases can only contain references to local provider configuration names in the format of provider.alias`, + Subject: kv.Value.Range().Ptr(), + }) + continue + } + + if addr.LocalName != name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid configuration_aliases value", + Detail: fmt.Sprintf(`Configuration aliases must be prefixed with the provider name. Expected %q, but found %q.`, name, addr.LocalName), + Subject: kv.Value.Range().Ptr(), + }) + continue + } + + rp.Aliases = append(rp.Aliases, addr) + } + + default: diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Invalid required_providers object", - Detail: `required_providers objects can only contain "version" and "source" attributes. To configure a provider, use a "provider" block.`, - Subject: attr.Expr.Range().Ptr(), + Detail: `required_providers objects can only contain "version", "source" and "configuration_aliases" attributes. To configure a provider, use a "provider" block.`, + Subject: kv.Key.Range().Ptr(), }) break } - default: - // should not happen - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid required_providers syntax", - Detail: "required_providers entries must be strings or objects.", - Subject: attr.Expr.Range().Ptr(), - }) } - if rp.Type.IsZero() && !diags.HasErrors() { // Don't try to generate an FQN if we've encountered errors + if diags.HasErrors() { + continue + } + + // We can add the required provider when there are no errors. + // If a source was not given, create an implied type. + if rp.Type.IsZero() { pType, err := addrs.ParseProviderPart(rp.Name) if err != nil { diags = append(diags, &hcl.Diagnostic{ diff --git a/configs/provider_requirements_test.go b/configs/provider_requirements_test.go index bf4d99882..69cac1b5b 100644 --- a/configs/provider_requirements_test.go +++ b/configs/provider_requirements_test.go @@ -185,15 +185,8 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { DefRange: blockRange, }, Want: &RequiredProviders{ - RequiredProviders: map[string]*RequiredProvider{ - "my-test": { - Name: "my-test", - Source: "some/invalid/provider/source/test", - Requirement: testVC("~>2.0.0"), - DeclRange: mockRange, - }, - }, - DeclRange: blockRange, + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, }, Error: "Invalid provider source string", }, @@ -213,15 +206,8 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { DefRange: blockRange, }, Want: &RequiredProviders{ - RequiredProviders: map[string]*RequiredProvider{ - "my_test": { - Name: "my_test", - Type: addrs.Provider{}, - Requirement: testVC("~>2.0.0"), - DeclRange: mockRange, - }, - }, - DeclRange: blockRange, + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, }, Error: "Invalid provider local name", }, @@ -241,15 +227,8 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { DefRange: blockRange, }, Want: &RequiredProviders{ - RequiredProviders: map[string]*RequiredProvider{ - "MYTEST": { - Name: "MYTEST", - Type: addrs.Provider{}, - Requirement: testVC("~>2.0.0"), - DeclRange: mockRange, - }, - }, - DeclRange: blockRange, + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, }, Error: "Invalid provider local name", }, @@ -270,15 +249,8 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { DefRange: blockRange, }, Want: &RequiredProviders{ - RequiredProviders: map[string]*RequiredProvider{ - "my-test": { - Name: "my-test", - Source: "mycloud/test", - Type: addrs.NewProvider(addrs.DefaultRegistryHost, "mycloud", "test"), - DeclRange: mockRange, - }, - }, - DeclRange: blockRange, + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, }, Error: "Invalid version constraint", }, @@ -296,15 +268,10 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { DefRange: blockRange, }, Want: &RequiredProviders{ - RequiredProviders: map[string]*RequiredProvider{ - "test": { - Name: "test", - DeclRange: mockRange, - }, - }, - DeclRange: blockRange, + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, }, - Error: "Invalid required_providers syntax", + Error: "Invalid required_providers object", }, "invalid source attribute type": { Block: &hcl.Block{ @@ -322,13 +289,8 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { DefRange: blockRange, }, Want: &RequiredProviders{ - RequiredProviders: map[string]*RequiredProvider{ - "my-test": { - Name: "my-test", - DeclRange: mockRange, - }, - }, - DeclRange: blockRange, + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, }, Error: "Invalid source", }, @@ -350,16 +312,8 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { DefRange: blockRange, }, Want: &RequiredProviders{ - RequiredProviders: map[string]*RequiredProvider{ - "my-test": { - Name: "my-test", - Source: "mycloud/test", - Type: addrs.NewProvider(addrs.DefaultRegistryHost, "mycloud", "test"), - Requirement: testVC("2.0.0"), - DeclRange: mockRange, - }, - }, - DeclRange: blockRange, + RequiredProviders: map[string]*RequiredProvider{}, + DeclRange: blockRange, }, Error: "Invalid required_providers object", }, @@ -370,7 +324,7 @@ func TestDecodeRequiredProvidersBlock(t *testing.T) { got, diags := decodeRequiredProvidersBlock(test.Block) if diags.HasErrors() { if test.Error == "" { - t.Fatalf("unexpected error") + t.Fatalf("unexpected error: %v", diags) } if gotErr := diags[0].Summary; gotErr != test.Error { t.Errorf("wrong error, got %q, want %q", gotErr, test.Error) diff --git a/configs/provider_validation.go b/configs/provider_validation.go new file mode 100644 index 000000000..57ad0fcdf --- /dev/null +++ b/configs/provider_validation.go @@ -0,0 +1,243 @@ +package configs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/addrs" +) + +// validateProviderConfigs walks the full configuration tree from the root +// module outward, static validation rules to the various combinations of +// provider configuration, required_providers values, and module call providers +// mappings. +// +// To retain compatibility with previous terraform versions, empty "proxy +// provider blocks" are still allowed within modules, though they will +// generate warnings when the configuration is loaded. The new validation +// however will generate an error if a suitable provider configuration is not +// passed in through the module call. +// +// The call argument is the ModuleCall for the provided Config cfg. The +// noProviderConfig argument is passed down the call stack, indicating that the +// module call, or a parent module call, has used a feature that precludes +// providers from being configured at all within the module. +func validateProviderConfigs(call *ModuleCall, cfg *Config, noProviderConfig bool) (diags hcl.Diagnostics) { + for name, child := range cfg.Children { + mc := cfg.Module.ModuleCalls[name] + + // if the module call has any of count, for_each or depends_on, + // providers are prohibited from being configured in this module, or + // any module beneath this module. + nope := noProviderConfig || mc.Count != nil || mc.ForEach != nil || mc.DependsOn != nil + diags = append(diags, validateProviderConfigs(mc, child, nope)...) + } + + // nothing else to do in the root module + if call == nil { + return diags + } + + // the set of provider configuration names passed into the module, with the + // source range of the provider assignment in the module call. + passedIn := map[string]PassedProviderConfig{} + + // the set of empty configurations that could be proxy configurations, with + // the source range of the empty configuration block. + emptyConfigs := map[string]*hcl.Range{} + + // the set of provider with a defined configuration, with the source range + // of the configuration block declaration. + configured := map[string]*hcl.Range{} + + // the set of configuration_aliases defined in the required_providers + // block, with the fully qualified provider type. + configAliases := map[string]addrs.AbsProviderConfig{} + + // the set of provider names defined in the required_providers block, and + // their provider types. + localNames := map[string]addrs.AbsProviderConfig{} + + for _, passed := range call.Providers { + name := providerName(passed.InChild.Name, passed.InChild.Alias) + passedIn[name] = passed + } + + mod := cfg.Module + + for _, pc := range mod.ProviderConfigs { + name := providerName(pc.Name, pc.Alias) + // Validate the config against an empty schema to see if it's empty. + _, pcConfigDiags := pc.Config.Content(&hcl.BodySchema{}) + if pcConfigDiags.HasErrors() || pc.Version.Required != nil { + configured[name] = &pc.DeclRange + } else { + emptyConfigs[name] = &pc.DeclRange + } + } + + if mod.ProviderRequirements != nil { + for _, req := range mod.ProviderRequirements.RequiredProviders { + addr := addrs.AbsProviderConfig{ + Module: cfg.Path, + Provider: req.Type, + } + localNames[req.Name] = addr + for _, alias := range req.Aliases { + addr := addrs.AbsProviderConfig{ + Module: cfg.Path, + Provider: req.Type, + Alias: alias.Alias, + } + configAliases[providerName(alias.LocalName, alias.Alias)] = addr + } + } + } + + // there cannot be any configurations if no provider config is allowed + if len(configured) > 0 && noProviderConfig { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Module %s contains provider configuration", cfg.Path), + Detail: "Providers cannot be configured within modules using count, for_each or depends_on.", + }) + } + + // now check that the user is not attempting to override a config + for name := range configured { + if passed, ok := passedIn[name]; ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Cannot override provider configuration", + Detail: fmt.Sprintf("Provider %s is configured within the module %s and cannot be overridden.", name, cfg.Path), + Subject: &passed.InChild.NameRange, + }) + } + } + + // A declared alias requires either a matching configuration within the + // module, or one must be passed in. + for name, providerAddr := range configAliases { + _, confOk := configured[name] + _, passedOk := passedIn[name] + + if confOk || passedOk { + continue + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("No configuration for provider %s", name), + Detail: fmt.Sprintf("Configuration required for %s.", providerAddr), + Subject: &call.DeclRange, + }) + } + + // You cannot pass in a provider that cannot be used + for name, passed := range passedIn { + providerAddr := addrs.AbsProviderConfig{ + Module: cfg.Path, + Provider: addrs.NewDefaultProvider(passed.InChild.Name), + Alias: passed.InChild.Alias, + } + + localAddr, localName := localNames[name] + if localName { + providerAddr = localAddr + } + + aliasAddr, configAlias := configAliases[name] + if configAlias { + providerAddr = aliasAddr + } + + _, emptyConfig := emptyConfigs[name] + + if !(localName || configAlias || emptyConfig) { + severity := hcl.DiagError + + // we still allow default configs, so switch to a warning if the incoming provider is a default + if providerAddr.Provider.IsDefault() { + severity = hcl.DiagWarning + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: severity, + Summary: fmt.Sprintf("Provider %s is undefined", name), + Detail: fmt.Sprintf("Module %s does not declare a provider named %s.\n", cfg.Path, name) + + fmt.Sprintf("If you wish to specify a provider configuration for the module, add an entry for %s in the required_providers block within the module.", name), + Subject: &passed.InChild.NameRange, + }) + } + + // The provider being passed in must also be of the correct type. + // While we would like to ensure required_providers exists here, + // implied default configuration is still allowed. + pTy := addrs.NewDefaultProvider(passed.InParent.Name) + + // use the full address for a nice diagnostic output + parentAddr := addrs.AbsProviderConfig{ + Module: cfg.Parent.Path, + Provider: pTy, + Alias: passed.InParent.Alias, + } + + if cfg.Parent.Module.ProviderRequirements != nil { + req, defined := cfg.Parent.Module.ProviderRequirements.RequiredProviders[name] + if defined { + parentAddr.Provider = req.Type + } + } + + if !providerAddr.Provider.Equals(parentAddr.Provider) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid type for provider %s", providerAddr), + Detail: fmt.Sprintf("Cannot use configuration from %s for %s. ", parentAddr, providerAddr) + + "The given provider configuration is for a different provider type.", + Subject: &passed.InChild.NameRange, + }) + } + } + + // Empty configurations are no longer needed + for name, src := range emptyConfigs { + detail := fmt.Sprintf("Remove the %s provider block from %s.", name, cfg.Path) + + isAlias := strings.Contains(name, ".") + _, isConfigAlias := configAliases[name] + _, isLocalName := localNames[name] + + if isAlias && !isConfigAlias { + localName := strings.Split(name, ".")[0] + detail = fmt.Sprintf("Remove the %s provider block from %s. Add %s to the list of configuration_aliases for %s in required_providers to define the provider configuration name.", name, cfg.Path, name, localName) + } + + if !isAlias && !isLocalName { + // if there is no local name, add a note to include it in the + // required_provider block + detail += fmt.Sprintf("\nTo ensure the correct provider configuration is used, add %s to the required_providers configuration", name) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Empty provider configuration blocks are not required", + Detail: detail, + Subject: src, + }) + } + + if diags.HasErrors() { + return diags + } + + return diags +} + +func providerName(name, alias string) string { + if alias != "" { + name = name + "." + alias + } + return name +} diff --git a/configs/provisioner.go b/configs/provisioner.go index 5b664d39b..84deda3a5 100644 --- a/configs/provisioner.go +++ b/configs/provisioner.go @@ -34,11 +34,12 @@ func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { switch pv.Type { case "chef", "habitat", "puppet", "salt-masterless": diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagWarning, - Summary: fmt.Sprintf("The \"%s\" provisioner is deprecated", pv.Type), - Detail: fmt.Sprintf("The \"%s\" provisioner is deprecated and will be removed from future versions of Terraform. Visit https://learn.hashicorp.com/collections/terraform/provision for alternatives to using provisioners that are a better fit for the Terraform workflow.", pv.Type), + Severity: hcl.DiagError, + Summary: fmt.Sprintf("The \"%s\" provisioner has been removed", pv.Type), + Detail: fmt.Sprintf("The \"%s\" provisioner was deprecated in Terraform 0.13.4 has been removed from Terraform. Visit https://learn.hashicorp.com/collections/terraform/provision for alternatives to using provisioners that are a better fit for the Terraform workflow.", pv.Type), Subject: &pv.TypeRange, }) + return nil, diags } if attr, exists := content.Attributes["when"]; exists { diff --git a/configs/resource.go b/configs/resource.go index e5cc8c606..a6946854d 100644 --- a/configs/resource.go +++ b/configs/resource.go @@ -66,8 +66,12 @@ func (r *Resource) Addr() addrs.Resource { // config addr if an explicit "provider" argument was not provided. func (r *Resource) ProviderConfigAddr() addrs.LocalProviderConfig { if r.ProviderConfigRef == nil { + // If no specific "provider" argument is given, we want to look up the + // provider config where the local name matches the implied provider + // from the resource type. This may be different from the resource's + // provider type. return addrs.LocalProviderConfig{ - LocalName: r.Provider.Type, + LocalName: r.Addr().ImpliedProvider(), } } diff --git a/configs/testdata/config-diagnostics/empty-configs/main.tf b/configs/testdata/config-diagnostics/empty-configs/main.tf new file mode 100644 index 000000000..c0edba275 --- /dev/null +++ b/configs/testdata/config-diagnostics/empty-configs/main.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + baz = { + source = "hashicorp/baz" + } + } +} + +module "mod" { + source = "./mod" + providers = { + foo = foo + foo.bar = foo + baz = baz + baz.bing = baz + } +} diff --git a/configs/testdata/config-diagnostics/empty-configs/mod/main.tf b/configs/testdata/config-diagnostics/empty-configs/mod/main.tf new file mode 100644 index 000000000..50995ca0b --- /dev/null +++ b/configs/testdata/config-diagnostics/empty-configs/mod/main.tf @@ -0,0 +1,22 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + configuration_aliases = [ foo.bar ] + } + } +} + +provider "foo" { +} + +provider "foo" { + alias = "bar" +} + +provider "baz" { +} + +provider "baz" { + alias = "bing" +} diff --git a/configs/testdata/config-diagnostics/empty-configs/warnings b/configs/testdata/config-diagnostics/empty-configs/warnings new file mode 100644 index 000000000..dcf6736e9 --- /dev/null +++ b/configs/testdata/config-diagnostics/empty-configs/warnings @@ -0,0 +1,4 @@ +empty-configs/mod/main.tf:10,1-15: Empty provider configuration blocks are not required; Remove the foo provider block from module.mod +empty-configs/mod/main.tf:13,1-15: Empty provider configuration blocks are not required; Remove the foo.bar provider block from module.mod +empty-configs/mod/main.tf:17,1-15: Empty provider configuration blocks are not required; Remove the baz provider block from module.mod.\nTo ensure the correct provider configuration is used, add baz to the required_providers configuration +empty-configs/mod/main.tf:20,1-15: Empty provider configuration blocks are not required; Remove the baz.bing provider block from module.mod. Add baz.bing to the list of configuration_aliases for baz in required_providers to define the provider configuration name diff --git a/configs/testdata/config-diagnostics/incorrect-type/errors b/configs/testdata/config-diagnostics/incorrect-type/errors new file mode 100644 index 000000000..28b210850 --- /dev/null +++ b/configs/testdata/config-diagnostics/incorrect-type/errors @@ -0,0 +1 @@ +incorrect-type/main.tf:15,5-8: Invalid type for provider module.mod.provider["example.com/vendor/foo"]; Cannot use configuration from provider["registry.terraform.io/hashicorp/foo"] for module.mod.provider["example.com/vendor/foo"] diff --git a/configs/testdata/config-diagnostics/incorrect-type/main.tf b/configs/testdata/config-diagnostics/incorrect-type/main.tf new file mode 100644 index 000000000..074cc8422 --- /dev/null +++ b/configs/testdata/config-diagnostics/incorrect-type/main.tf @@ -0,0 +1,18 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + baz = { + source = "hashicorp/baz" + } + } +} + +module "mod" { + source = "./mod" + providers = { + foo = foo + baz = baz + } +} diff --git a/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf b/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf new file mode 100644 index 000000000..14c3239e9 --- /dev/null +++ b/configs/testdata/config-diagnostics/incorrect-type/mod/main.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + foo = { + source = "example.com/vendor/foo" + } + } +} + +resource "foo_resource" "a" { +} + +// implied default provider baz +resource "baz_resource" "a" { +} diff --git a/configs/testdata/config-diagnostics/incorrect-type/warnings b/configs/testdata/config-diagnostics/incorrect-type/warnings new file mode 100644 index 000000000..a87f1f742 --- /dev/null +++ b/configs/testdata/config-diagnostics/incorrect-type/warnings @@ -0,0 +1 @@ +incorrect-type/main.tf:16,5-8: Provider baz is undefined; Module module.mod does not declare a provider named baz.\nIf you wish to specify a provider configuration for the module diff --git a/configs/configload/testdata/expand-modules/valid/child/main.tf b/configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf similarity index 80% rename from configs/configload/testdata/expand-modules/valid/child/main.tf rename to configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf index a5c3c47b1..f2695a661 100644 --- a/configs/configload/testdata/expand-modules/valid/child/main.tf +++ b/configs/testdata/config-diagnostics/nested-provider/child/child2/main.tf @@ -1,7 +1,7 @@ provider "aws" { + value = "foo" } output "my_output" { value = "my output" } - diff --git a/configs/testdata/config-diagnostics/nested-provider/child/main.tf b/configs/testdata/config-diagnostics/nested-provider/child/main.tf new file mode 100644 index 000000000..9a725a520 --- /dev/null +++ b/configs/testdata/config-diagnostics/nested-provider/child/main.tf @@ -0,0 +1,4 @@ +module "child2" { + // the test fixture treats these sources as relative to the root + source = "./child/child2" +} diff --git a/configs/testdata/config-diagnostics/nested-provider/errors b/configs/testdata/config-diagnostics/nested-provider/errors new file mode 100644 index 000000000..8f44cac78 --- /dev/null +++ b/configs/testdata/config-diagnostics/nested-provider/errors @@ -0,0 +1,3 @@ +Module module.child.module.child2 contains provider configuration; Providers cannot be configured within modules using count, for_each or depends_on + + diff --git a/configs/configload/testdata/expand-modules/more-nested-provider/root.tf b/configs/testdata/config-diagnostics/nested-provider/root.tf similarity index 100% rename from configs/configload/testdata/expand-modules/more-nested-provider/root.tf rename to configs/testdata/config-diagnostics/nested-provider/root.tf diff --git a/configs/testdata/config-diagnostics/override-provider/errors b/configs/testdata/config-diagnostics/override-provider/errors new file mode 100644 index 000000000..a8d59d6e5 --- /dev/null +++ b/configs/testdata/config-diagnostics/override-provider/errors @@ -0,0 +1 @@ +override-provider/main.tf:17,5-8: Cannot override provider configuration; Provider bar is configured within the module module.mod and cannot be overridden. diff --git a/configs/testdata/config-diagnostics/override-provider/main.tf b/configs/testdata/config-diagnostics/override-provider/main.tf new file mode 100644 index 000000000..30feec1c9 --- /dev/null +++ b/configs/testdata/config-diagnostics/override-provider/main.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + bar = { + version = "~>1.0.0" + } + } +} + +provider "bar" { + value = "not ok" +} + +// this module configures its own provider, which cannot be overridden +module "mod" { + source = "./mod" + providers = { + bar = bar + } +} diff --git a/configs/testdata/config-diagnostics/override-provider/mod/main.tf b/configs/testdata/config-diagnostics/override-provider/mod/main.tf new file mode 100644 index 000000000..c0b616971 --- /dev/null +++ b/configs/testdata/config-diagnostics/override-provider/mod/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + bar = { + version = "~>1.0.0" + } + } +} + +// this configuration cannot be overridden from an outside module +provider "bar" { + value = "ok" +} diff --git a/configs/testdata/config-diagnostics/required-alias/errors b/configs/testdata/config-diagnostics/required-alias/errors new file mode 100644 index 000000000..a1b944a43 --- /dev/null +++ b/configs/testdata/config-diagnostics/required-alias/errors @@ -0,0 +1 @@ +required-alias/main.tf:1,1-13: No configuration for provider foo.bar; Configuration required for module.mod.provider["registry.terraform.io/hashicorp/foo"].bar diff --git a/configs/testdata/config-diagnostics/required-alias/main.tf b/configs/testdata/config-diagnostics/required-alias/main.tf new file mode 100644 index 000000000..c2cfe60b8 --- /dev/null +++ b/configs/testdata/config-diagnostics/required-alias/main.tf @@ -0,0 +1,4 @@ +module "mod" { + source = "./mod" + // missing providers with foo.bar provider config +} diff --git a/configs/testdata/config-diagnostics/required-alias/mod/main.tf b/configs/testdata/config-diagnostics/required-alias/mod/main.tf new file mode 100644 index 000000000..0f2a52168 --- /dev/null +++ b/configs/testdata/config-diagnostics/required-alias/mod/main.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + version = "1.0.0" + configuration_aliases = [ foo.bar ] + } + } +} + +resource "foo_resource" "a" { + provider = foo.bar +} diff --git a/configs/testdata/config-diagnostics/unexpected-provider/main.tf b/configs/testdata/config-diagnostics/unexpected-provider/main.tf new file mode 100644 index 000000000..cd859a726 --- /dev/null +++ b/configs/testdata/config-diagnostics/unexpected-provider/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + version = "1.0.0" + } + } +} + +module "mod" { + source = "./mod" + providers = { + foo = foo + } +} diff --git a/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf b/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf new file mode 100644 index 000000000..f69bfa813 --- /dev/null +++ b/configs/testdata/config-diagnostics/unexpected-provider/mod/main.tf @@ -0,0 +1,2 @@ +resource "foo_resource" "a" { +} diff --git a/configs/testdata/config-diagnostics/unexpected-provider/warnings b/configs/testdata/config-diagnostics/unexpected-provider/warnings new file mode 100644 index 000000000..0e41b39a9 --- /dev/null +++ b/configs/testdata/config-diagnostics/unexpected-provider/warnings @@ -0,0 +1,2 @@ +unexpected-provider/main.tf:13,5-8: Provider foo is undefined; Module module.mod does not declare a provider named foo. + diff --git a/configs/testdata/config-diagnostics/with-depends-on/main.tf b/configs/testdata/config-diagnostics/with-depends-on/main.tf new file mode 100644 index 000000000..49c2dcd6e --- /dev/null +++ b/configs/testdata/config-diagnostics/with-depends-on/main.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + } +} + +module "mod2" { + source = "./mod1" + providers = { + foo = foo + } +} diff --git a/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf b/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf new file mode 100644 index 000000000..c318484b5 --- /dev/null +++ b/configs/testdata/config-diagnostics/with-depends-on/mod1/main.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + } +} + +resource "foo_resource" "a" { +} + +module "mod2" { + depends_on = [foo_resource.a] + // test fixture source is from root + source = "./mod1/mod2" + providers = { + foo = foo + } +} diff --git a/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf b/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf new file mode 100644 index 000000000..eaa3550bd --- /dev/null +++ b/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/main.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + } + } +} + +module "mod3" { + // test fixture source is from root + source = "./mod1/mod2/mod3" + providers = { + foo.bar = foo + } +} diff --git a/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf b/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf new file mode 100644 index 000000000..b1827126d --- /dev/null +++ b/configs/testdata/config-diagnostics/with-depends-on/mod1/mod2/mod3/main.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + foo = { + source = "hashicorp/foo" + configuration_aliases = [ foo.bar ] + } + } +} + +resource "foo_resource" "a" { + providers = foo.bar +} diff --git a/configs/testdata/error-files/provider-source-prefix.tf b/configs/testdata/error-files/provider-source-prefix.tf index 99cd76df7..96811699f 100644 --- a/configs/testdata/error-files/provider-source-prefix.tf +++ b/configs/testdata/error-files/provider-source-prefix.tf @@ -1,10 +1,10 @@ terraform { required_providers { - usererror = { # ERROR: Invalid provider type - source = "foo/terraform-provider-foo" + usererror = { + source = "foo/terraform-provider-foo" # ERROR: Invalid provider type } - badname = { # ERROR: Invalid provider type - source = "foo/terraform-foo" + badname = { + source = "foo/terraform-foo" # ERROR: Invalid provider type } } } diff --git a/configs/testdata/error-files/vendor_provisioners.tf b/configs/testdata/error-files/vendor_provisioners.tf new file mode 100644 index 000000000..4d2ec7892 --- /dev/null +++ b/configs/testdata/error-files/vendor_provisioners.tf @@ -0,0 +1,3 @@ +resource "null_resource" "test" { + provisioner "habitat" {} # ERROR: The "habitat" provisioner has been removed +} diff --git a/configs/testdata/nested-backend-warning/child/child.tf b/configs/testdata/nested-backend-warning/child/child.tf new file mode 100644 index 000000000..5a6948e85 --- /dev/null +++ b/configs/testdata/nested-backend-warning/child/child.tf @@ -0,0 +1,6 @@ +terraform { + # Only the root module can declare a backend. Terraform should emit a warning + # about this child module backend declaration. + backend "ignored" { + } +} diff --git a/terraform/testdata/empty-with-child-module/root.tf b/configs/testdata/nested-backend-warning/root.tf similarity index 100% rename from terraform/testdata/empty-with-child-module/root.tf rename to configs/testdata/nested-backend-warning/root.tf diff --git a/configs/testdata/valid-modules/provider-aliases/main.tf b/configs/testdata/valid-modules/provider-aliases/main.tf new file mode 100644 index 000000000..dd9fb084d --- /dev/null +++ b/configs/testdata/valid-modules/provider-aliases/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + foo-test = { + source = "foo/test" + configuration_aliases = [foo-test.a, foo-test.b] + } + } +} + +resource "test_instance" "explicit" { + provider = foo-test.a +} + +data "test_resource" "explicit" { + provider = foo-test.b +} + diff --git a/configs/testdata/warning-files/vendor_provisioners.tf b/configs/testdata/warning-files/vendor_provisioners.tf deleted file mode 100644 index 7da591df6..000000000 --- a/configs/testdata/warning-files/vendor_provisioners.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "null_resource" "test" { - provisioner "habitat" {} # WARNING: The "habitat" provisioner is deprecated -} diff --git a/contrib/fish-completion/README.md b/contrib/fish-completion/README.md deleted file mode 100644 index a50ed1e81..000000000 --- a/contrib/fish-completion/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Terraform fish shell completion - -Copy the completions to your local fish configuration: - -``` -mkdir -p ~/.config/fish/completions -cp terraform.fish ~/.config/fish/completions -``` - -Please note that these completions have been merged upstream and should be bundled with fish 2.6 or later. diff --git a/contrib/fish-completion/terraform.fish b/contrib/fish-completion/terraform.fish deleted file mode 100644 index 0c5646230..000000000 --- a/contrib/fish-completion/terraform.fish +++ /dev/null @@ -1,170 +0,0 @@ -# general options -complete -f -c terraform -l version -d 'Print version information' -complete -f -c terraform -l help -d 'Show help' - -### apply -complete -f -c terraform -n '__fish_use_subcommand' -a apply -d 'Build or change infrastructure' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o backup -d 'Path to backup the existing state file' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o lock -d 'Lock the state file when locking is supported' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o input -d 'Ask for input for variables if not directly set' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o parallelism -d 'Limit the number of concurrent operations' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o refresh -d 'Update state prior to checking for differences' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o state -d 'Path to a Terraform state file' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o state-out -d 'Path to write state' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o target -d 'Resource to target' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o var -d 'Set a variable in the Terraform configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from apply' -o var-file -d 'Set variables from a file' - -### console -complete -f -c terraform -n '__fish_use_subcommand' -a console -d 'Interactive console for Terraform interpolations' -complete -f -c terraform -n '__fish_seen_subcommand_from console' -o state -d 'Path to a Terraform state file' -complete -f -c terraform -n '__fish_seen_subcommand_from console' -o var -d 'Set a variable in the Terraform configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from console' -o var-file -d 'Set variables from a file' - -### destroy -complete -f -c terraform -n '__fish_use_subcommand' -a destroy -d 'Destroy Terraform-managed infrastructure' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o backup -d 'Path to backup the existing state file' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o force -d 'Don\'t ask for input for destroy confirmation' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o lock -d 'Lock the state file when locking is supported' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o parallelism -d 'Limit the number of concurrent operations' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o refresh -d 'Update state prior to checking for differences' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o state -d 'Path to a Terraform state file' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o state-out -d 'Path to write state' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o target -d 'Resource to target' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o var -d 'Set a variable in the Terraform configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from destroy' -o var-file -d 'Set variables from a file' - -### env -complete -f -c terraform -n '__fish_use_subcommand' -a env -d 'Environment management' -complete -f -c terraform -n '__fish_seen_subcommand_from env' -a list -d 'List environments' -complete -f -c terraform -n '__fish_seen_subcommand_from env' -a select -d 'Select an environment' -complete -f -c terraform -n '__fish_seen_subcommand_from env' -a new -d 'Create a new environment' -complete -f -c terraform -n '__fish_seen_subcommand_from env' -a delete -d 'Delete an existing environment' - -### fmt -complete -f -c terraform -n '__fish_use_subcommand' -a fmt -d 'Rewrite config files to canonical format' -complete -f -c terraform -n '__fish_seen_subcommand_from fmt' -o list -d 'List files whose formatting differs' -complete -f -c terraform -n '__fish_seen_subcommand_from fmt' -o write -d 'Write result to source file' -complete -f -c terraform -n '__fish_seen_subcommand_from fmt' -o diff -d 'Display diffs of formatting changes' - -### get -complete -f -c terraform -n '__fish_use_subcommand' -a get -d 'Download and install modules for the configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from get' -o update -d 'Check modules for updates' -complete -f -c terraform -n '__fish_seen_subcommand_from get' -o no-color -d 'If specified, output won\'t contain any color' - -### graph -complete -f -c terraform -n '__fish_use_subcommand' -a graph -d 'Create a visual graph of Terraform resources' -complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o draw-cycles -d 'Highlight any cycles in the graph' -complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o module-depth -d 'Depth of modules to show in the output' -complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o type -d 'Type of graph to output' - -### import -complete -f -c terraform -n '__fish_use_subcommand' -a import -d 'Import existing infrastructure into Terraform' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o backup -d 'Path to backup the existing state file' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o config -d 'Path to a directory of configuration files' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o input -d 'Ask for input for variables if not directly set' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o lock -d 'Lock the state file when locking is supported' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o provider -d 'Specific provider to use for import' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o state -d 'Path to a Terraform state file' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o state-out -d 'Path to write state' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o var -d 'Set a variable in the Terraform configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from import' -o var-file -d 'Set variables from a file' - -### init -complete -f -c terraform -n '__fish_use_subcommand' -a init -d 'Initialize a new or existing Terraform configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from init' -o backend -d 'Configure the backend for this environment' -complete -f -c terraform -n '__fish_seen_subcommand_from init' -o backend-config -d 'Backend configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from init' -o get -d 'Download modules for this configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from init' -o input -d 'Ask for input if necessary' -complete -f -c terraform -n '__fish_seen_subcommand_from init' -o lock -d 'Lock the state file when locking is supported' -complete -f -c terraform -n '__fish_seen_subcommand_from init' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from init' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from init' -o force-copy -d 'Suppress prompts about copying state data' - -### output -complete -f -c terraform -n '__fish_use_subcommand' -a output -d 'Read an output from a state file' -complete -f -c terraform -n '__fish_seen_subcommand_from output' -o state -d 'Path to the state file to read' -complete -f -c terraform -n '__fish_seen_subcommand_from output' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from output' -o module -d 'Return the outputs for a specific module' -complete -f -c terraform -n '__fish_seen_subcommand_from output' -o json -d 'Print output in JSON format' - -### plan -complete -f -c terraform -n '__fish_use_subcommand' -a plan -d 'Generate and show an execution plan' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o destroy -d 'Generate a plan to destroy all resources' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o detailed-exitcode -d 'Return detailed exit codes' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o input -d 'Ask for input for variables if not directly set' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o lock -d 'Lock the state file when locking is supported' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o out -d 'Write a plan file to the given path' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o parallelism -d 'Limit the number of concurrent operations' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o refresh -d 'Update state prior to checking for differences' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o state -d 'Path to a Terraform state file' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o target -d 'Resource to target' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o var -d 'Set a variable in the Terraform configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o var-file -d 'Set variables from a file' - -### push -complete -f -c terraform -n '__fish_use_subcommand' -a push -d 'Upload this Terraform module to Atlas to run' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o atlas-address -d 'An alternate address to an Atlas instance' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o upload-modules -d 'Lock modules and upload completely' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o name -d 'Name of the configuration in Atlas' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o token -d 'Access token to use to upload' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o overwrite -d 'Variable keys that should overwrite values in Atlas' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o var -d 'Set a variable in the Terraform configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o var-file -d 'Set variables from a file' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o vcs -d 'Upload only files committed to your VCS' -complete -f -c terraform -n '__fish_seen_subcommand_from push' -o no-color -d 'If specified, output won\'t contain any color' - -### refresh -complete -f -c terraform -n '__fish_use_subcommand' -a refresh -d 'Update local state file against real resources' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o backup -d 'Path to backup the existing state file' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o input -d 'Ask for input for variables if not directly set' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o lock -d 'Lock the state file when locking is supported' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o state -d 'Path to a Terraform state file' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o state-out -d 'Path to write state' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o target -d 'Resource to target' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o var -d 'Set a variable in the Terraform configuration' -complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o var-file -d 'Set variables from a file' - -### show -complete -f -c terraform -n '__fish_use_subcommand' -a show -d 'Inspect Terraform state or plan' -complete -f -c terraform -n '__fish_seen_subcommand_from show' -o no-color -d 'If specified, output won\'t contain any color' - -### taint -complete -f -c terraform -n '__fish_use_subcommand' -a taint -d 'Manually mark a resource for recreation' -complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o allow-missing -d 'Succeed even if resource is missing' -complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o backup -d 'Path to backup the existing state file' -complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o lock -d 'Lock the state file when locking is supported' -complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o module -d 'The module path where the resource lives' -complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o state -d 'Path to a Terraform state file' -complete -f -c terraform -n '__fish_seen_subcommand_from taint' -o state-out -d 'Path to write state' - -### untaint -complete -f -c terraform -n '__fish_use_subcommand' -a untaint -d 'Manually unmark a resource as tainted' -complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o allow-missing -d 'Succeed even if resource is missing' -complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o backup -d 'Path to backup the existing state file' -complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o lock -d 'Lock the state file when locking is supported' -complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o module -d 'The module path where the resource lives' -complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o no-color -d 'If specified, output won\'t contain any color' -complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o state -d 'Path to a Terraform state file' -complete -f -c terraform -n '__fish_seen_subcommand_from untaint' -o state-out -d 'Path to write state' - -### validate -complete -f -c terraform -n '__fish_use_subcommand' -a validate -d 'Validate the Terraform files' -complete -f -c terraform -n '__fish_seen_subcommand_from validate' -o no-color -d 'If specified, output won\'t contain any color' - -### version -complete -f -c terraform -n '__fish_use_subcommand' -a version -d 'Print the Terraform version' diff --git a/contrib/zsh-completion/README.md b/contrib/zsh-completion/README.md deleted file mode 100644 index 0f8e2e811..000000000 --- a/contrib/zsh-completion/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Terraform zsh completion - -## Install -```console -% terraform -install-autocomplete -``` - -## Uninstall -```console -% terraform -uninstall-autocomplete -``` diff --git a/dag/dag_test.go b/dag/dag_test.go index ae2c2387e..90a9f75c6 100644 --- a/dag/dag_test.go +++ b/dag/dag_test.go @@ -340,7 +340,7 @@ func BenchmarkDAG(b *testing.B) { // layer B for i := 0; i < count; i++ { B := fmt.Sprintf("B%d", i) - g.Add(fmt.Sprintf(B)) + g.Add(B) for j := 0; j < count; j++ { g.Connect(BasicEdge(B, fmt.Sprintf("A%d", j))) } @@ -349,7 +349,7 @@ func BenchmarkDAG(b *testing.B) { // layer C for i := 0; i < count; i++ { c := fmt.Sprintf("C%d", i) - g.Add(fmt.Sprintf(c)) + g.Add(c) for j := 0; j < count; j++ { // connect them to previous layers so we have something that requires reduction g.Connect(BasicEdge(c, fmt.Sprintf("A%d", j))) @@ -360,7 +360,7 @@ func BenchmarkDAG(b *testing.B) { // layer D for i := 0; i < count; i++ { d := fmt.Sprintf("D%d", i) - g.Add(fmt.Sprintf(d)) + g.Add(d) for j := 0; j < count; j++ { g.Connect(BasicEdge(d, fmt.Sprintf("A%d", j))) g.Connect(BasicEdge(d, fmt.Sprintf("B%d", j))) diff --git a/dag/graph.go b/dag/graph.go index 1d0544354..222ac0786 100644 --- a/dag/graph.go +++ b/dag/graph.go @@ -337,7 +337,7 @@ func VertexName(raw Vertex) string { case NamedVertex: return v.Name() case fmt.Stringer: - return fmt.Sprintf("%s", v) + return v.String() default: return fmt.Sprintf("%v", v) } diff --git a/dag/marshal.go b/dag/marshal.go index 0ad45e8cb..0ba52152f 100644 --- a/dag/marshal.go +++ b/dag/marshal.go @@ -7,18 +7,6 @@ import ( "strconv" ) -const ( - typeOperation = "Operation" - typeTransform = "Transform" - typeWalk = "Walk" - typeDepthFirstWalk = "DepthFirstWalk" - typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" - typeTransitiveReduction = "TransitiveReduction" - typeEdgeInfo = "EdgeInfo" - typeVertexInfo = "VertexInfo" - typeVisitInfo = "VisitInfo" -) - // the marshal* structs are for serialization of the graph data. type marshalGraph struct { // Type is always "Graph", for identification as a top level object in the @@ -49,36 +37,6 @@ type marshalGraph struct { Cycles [][]*marshalVertex `json:",omitempty"` } -// The add, remove, connect, removeEdge methods mirror the basic Graph -// manipulations to reconstruct a marshalGraph from a debug log. -func (g *marshalGraph) add(v *marshalVertex) { - g.Vertices = append(g.Vertices, v) - sort.Sort(vertices(g.Vertices)) -} - -func (g *marshalGraph) remove(v *marshalVertex) { - for i, existing := range g.Vertices { - if v.ID == existing.ID { - g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) - return - } - } -} - -func (g *marshalGraph) connect(e *marshalEdge) { - g.Edges = append(g.Edges, e) - sort.Sort(edges(g.Edges)) -} - -func (g *marshalGraph) removeEdge(e *marshalEdge) { - for i, existing := range g.Edges { - if e.Source == existing.Source && e.Target == existing.Target { - g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) - return - } - } -} - func (g *marshalGraph) vertexByID(id string) *marshalVertex { for _, v := range g.Vertices { if id == v.ID { diff --git a/dag/set.go b/dag/set.go index c5c1af120..fc16e801b 100644 --- a/dag/set.go +++ b/dag/set.go @@ -56,15 +56,13 @@ func (s Set) Intersection(other Set) Set { // other doesn't. func (s Set) Difference(other Set) Set { result := make(Set) - if s != nil { - for k, v := range s { - var ok bool - if other != nil { - _, ok = other[k] - } - if !ok { - result.Add(v) - } + for k, v := range s { + var ok bool + if other != nil { + _, ok = other[k] + } + if !ok { + result.Add(v) } } diff --git a/dag/walk.go b/dag/walk.go index f9fdf2dfc..26b249230 100644 --- a/dag/walk.go +++ b/dag/walk.go @@ -106,11 +106,6 @@ type walkerVertex struct { depsCancelCh chan struct{} } -// errWalkUpstream is used in the errMap of a walk to note that an upstream -// dependency failed so this vertex wasn't run. This is not shown in the final -// user-returned error. -var errWalkUpstream = errors.New("upstream dependency failed") - // Wait waits for the completion of the walk and returns diagnostics describing // any problems that arose. Update should be called to populate the walk with // vertices and edges prior to calling this. @@ -303,7 +298,7 @@ func (w *Walker) Update(g *AcyclicGraph) { } // Start all the new vertices. We do this at the end so that all - // the edge waiters and changes are setup above. + // the edge waiters and changes are set up above. for _, raw := range newVerts { v := raw.(Vertex) go w.walkVertex(v, w.vertexMap[v]) diff --git a/digraph/basic.go b/digraph/basic.go deleted file mode 100644 index 8dc76838d..000000000 --- a/digraph/basic.go +++ /dev/null @@ -1,89 +0,0 @@ -package digraph - -import ( - "fmt" - "strings" -) - -// BasicNode is a digraph Node that has a name and out edges -type BasicNode struct { - Name string - NodeEdges []Edge -} - -func (b *BasicNode) Edges() []Edge { - return b.NodeEdges -} - -func (b *BasicNode) AddEdge(edge Edge) { - b.NodeEdges = append(b.NodeEdges, edge) -} - -func (b *BasicNode) String() string { - if b.Name == "" { - return "Node" - } - return fmt.Sprintf("%v", b.Name) -} - -// BasicEdge is a digraph Edge that has a name, head and tail -type BasicEdge struct { - Name string - EdgeHead *BasicNode - EdgeTail *BasicNode -} - -func (b *BasicEdge) Head() Node { - return b.EdgeHead -} - -// Tail returns the end point of the Edge -func (b *BasicEdge) Tail() Node { - return b.EdgeTail -} - -func (b *BasicEdge) String() string { - if b.Name == "" { - return "Edge" - } - return fmt.Sprintf("%v", b.Name) -} - -// ParseBasic is used to parse a string in the format of: -// a -> b ; edge name -// b -> c -// Into a series of basic node and basic edges -func ParseBasic(s string) map[string]*BasicNode { - lines := strings.Split(s, "\n") - nodes := make(map[string]*BasicNode) - for _, line := range lines { - var edgeName string - if idx := strings.Index(line, ";"); idx >= 0 { - edgeName = strings.Trim(line[idx+1:], " \t\r\n") - line = line[:idx] - } - parts := strings.SplitN(line, "->", 2) - if len(parts) != 2 { - continue - } - head_name := strings.Trim(parts[0], " \t\r\n") - tail_name := strings.Trim(parts[1], " \t\r\n") - head := nodes[head_name] - if head == nil { - head = &BasicNode{Name: head_name} - nodes[head_name] = head - } - tail := nodes[tail_name] - if tail == nil { - tail = &BasicNode{Name: tail_name} - nodes[tail_name] = tail - } - edge := &BasicEdge{ - Name: edgeName, - EdgeHead: head, - EdgeTail: tail, - } - head.AddEdge(edge) - } - return nodes -} diff --git a/digraph/basic_test.go b/digraph/basic_test.go deleted file mode 100644 index 20584b09b..000000000 --- a/digraph/basic_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package digraph - -import ( - "fmt" - "testing" -) - -func TestParseBasic(t *testing.T) { - spec := `a -> b ; first -b -> c ; second -b -> d ; third -z -> a` - nodes := ParseBasic(spec) - if len(nodes) != 5 { - t.Fatalf("bad: %v", nodes) - } - - a := nodes["a"] - if a.Name != "a" { - t.Fatalf("bad: %v", a) - } - aEdges := a.Edges() - if len(aEdges) != 1 { - t.Fatalf("bad: %v", a.Edges()) - } - if fmt.Sprintf("%v", aEdges[0]) != "first" { - t.Fatalf("bad: %v", aEdges[0]) - } - - b := nodes["b"] - if len(b.Edges()) != 2 { - t.Fatalf("bad: %v", b.Edges()) - } - - c := nodes["c"] - if len(c.Edges()) != 0 { - t.Fatalf("bad: %v", c.Edges()) - } - - d := nodes["d"] - if len(d.Edges()) != 0 { - t.Fatalf("bad: %v", d.Edges()) - } - - z := nodes["z"] - zEdges := z.Edges() - if len(zEdges) != 1 { - t.Fatalf("bad: %v", z.Edges()) - } - if fmt.Sprintf("%v", zEdges[0]) != "Edge" { - t.Fatalf("bad: %v", zEdges[0]) - } -} diff --git a/digraph/digraph.go b/digraph/digraph.go deleted file mode 100644 index ccf311170..000000000 --- a/digraph/digraph.go +++ /dev/null @@ -1,34 +0,0 @@ -package digraph - -// Digraph is used to represent a Directed Graph. This means -// we have a set of nodes, and a set of edges which are directed -// from a source and towards a destination -type Digraph interface { - // Nodes provides all the nodes in the graph - Nodes() []Node - - // Sources provides all the source nodes in the graph - Sources() []Node - - // Sinks provides all the sink nodes in the graph - Sinks() []Node - - // Transpose reverses the edge directions and returns - // a new Digraph - Transpose() Digraph -} - -// Node represents a vertex in a Digraph -type Node interface { - // Edges returns the out edges for a given nod - Edges() []Edge -} - -// Edge represents a directed edge in a Digraph -type Edge interface { - // Head returns the start point of the Edge - Head() Node - - // Tail returns the end point of the Edge - Tail() Node -} diff --git a/digraph/graphviz.go b/digraph/graphviz.go deleted file mode 100644 index db6952ebb..000000000 --- a/digraph/graphviz.go +++ /dev/null @@ -1,28 +0,0 @@ -package digraph - -import ( - "fmt" - "io" -) - -// WriteDot is used to emit a GraphViz compatible definition -// for a directed graph. It can be used to dump a .dot file. -func WriteDot(w io.Writer, nodes []Node) error { - w.Write([]byte("digraph {\n")) - defer w.Write([]byte("}\n")) - - for _, n := range nodes { - nodeLine := fmt.Sprintf("\t\"%s\";\n", n) - - w.Write([]byte(nodeLine)) - - for _, edge := range n.Edges() { - target := edge.Tail() - line := fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\"];\n", - n, target, edge) - w.Write([]byte(line)) - } - } - - return nil -} diff --git a/digraph/graphviz_test.go b/digraph/graphviz_test.go deleted file mode 100644 index 69e4ebb89..000000000 --- a/digraph/graphviz_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package digraph - -import ( - "bytes" - "strings" - "testing" -) - -func TestWriteDot(t *testing.T) { - nodes := ParseBasic(`a -> b ; foo -a -> c -b -> d -b -> e -`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - buf := bytes.NewBuffer(nil) - if err := WriteDot(buf, nlist); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(string(buf.Bytes())) - expected := strings.TrimSpace(writeDotStr) - - actualLines := strings.Split(actual, "\n") - expectedLines := strings.Split(expected, "\n") - - if actualLines[0] != expectedLines[0] || - actualLines[len(actualLines)-1] != expectedLines[len(expectedLines)-1] || - len(actualLines) != len(expectedLines) { - t.Fatalf("bad: %s", actual) - } - - count := 0 - for _, el := range expectedLines[1 : len(expectedLines)-1] { - for _, al := range actualLines[1 : len(actualLines)-1] { - if el == al { - count++ - break - } - } - } - - if count != len(expectedLines)-2 { - t.Fatalf("bad: %s", actual) - } -} - -const writeDotStr = ` -digraph { - "a"; - "a" -> "b" [label="foo"]; - "a" -> "c" [label="Edge"]; - "b"; - "b" -> "d" [label="Edge"]; - "b" -> "e" [label="Edge"]; - "c"; - "d"; - "e"; -} -` diff --git a/digraph/tarjan.go b/digraph/tarjan.go deleted file mode 100644 index 2298610ed..000000000 --- a/digraph/tarjan.go +++ /dev/null @@ -1,111 +0,0 @@ -package digraph - -// sccAcct is used ot pass around accounting information for -// the StronglyConnectedComponents algorithm -type sccAcct struct { - ExcludeSingle bool - NextIndex int - NodeIndex map[Node]int - Stack []Node - SCC [][]Node -} - -// visit assigns an index and pushes a node onto the stack -func (s *sccAcct) visit(n Node) int { - idx := s.NextIndex - s.NodeIndex[n] = idx - s.NextIndex++ - s.push(n) - return idx -} - -// push adds a node to the stack -func (s *sccAcct) push(n Node) { - s.Stack = append(s.Stack, n) -} - -// pop removes a node from the stack -func (s *sccAcct) pop() Node { - n := len(s.Stack) - if n == 0 { - return nil - } - node := s.Stack[n-1] - s.Stack = s.Stack[:n-1] - return node -} - -// inStack checks if a node is in the stack -func (s *sccAcct) inStack(needle Node) bool { - for _, n := range s.Stack { - if n == needle { - return true - } - } - return false -} - -// StronglyConnectedComponents implements Tarjan's algorithm to -// find all the strongly connected components in a graph. This can -// be used to detected any cycles in a graph, as well as which nodes -// partipate in those cycles. excludeSingle is used to exclude strongly -// connected components of size one. -func StronglyConnectedComponents(nodes []Node, excludeSingle bool) [][]Node { - acct := sccAcct{ - ExcludeSingle: excludeSingle, - NextIndex: 1, - NodeIndex: make(map[Node]int, len(nodes)), - } - for _, node := range nodes { - // Recurse on any non-visited nodes - if acct.NodeIndex[node] == 0 { - stronglyConnected(&acct, node) - } - } - return acct.SCC -} - -func stronglyConnected(acct *sccAcct, node Node) int { - // Initial node visit - index := acct.visit(node) - minIdx := index - - for _, edge := range node.Edges() { - target := edge.Tail() - targetIdx := acct.NodeIndex[target] - - // Recurse on successor if not yet visited - if targetIdx == 0 { - minIdx = min(minIdx, stronglyConnected(acct, target)) - - } else if acct.inStack(target) { - // Check if the node is in the stack - minIdx = min(minIdx, targetIdx) - } - } - - // Pop the strongly connected components off the stack if - // this is a root node - if index == minIdx { - var scc []Node - for { - n := acct.pop() - scc = append(scc, n) - if n == node { - break - } - } - if !(acct.ExcludeSingle && len(scc) == 1) { - acct.SCC = append(acct.SCC, scc) - } - } - - return minIdx -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} diff --git a/digraph/tarjan_test.go b/digraph/tarjan_test.go deleted file mode 100644 index d14a75ec8..000000000 --- a/digraph/tarjan_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package digraph - -import ( - "reflect" - "sort" - "testing" -) - -func TestStronglyConnectedComponents(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -b -> c -c -> b -c -> d -d -> e`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - sccs := StronglyConnectedComponents(nlist, false) - if len(sccs) != 4 { - t.Fatalf("bad: %v", sccs) - } - - sccs = StronglyConnectedComponents(nlist, true) - if len(sccs) != 1 { - t.Fatalf("bad: %v", sccs) - } - - cycle := sccs[0] - if len(cycle) != 2 { - t.Fatalf("bad: %v", sccs) - } - - cycleNodes := make([]string, len(cycle)) - for i, c := range cycle { - cycleNodes[i] = c.(*BasicNode).Name - } - sort.Strings(cycleNodes) - - expected := []string{"b", "c"} - if !reflect.DeepEqual(cycleNodes, expected) { - t.Fatalf("bad: %#v", cycleNodes) - } -} - -func TestStronglyConnectedComponents2(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -b -> d -b -> e -c -> f -c -> g -g -> a -`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - sccs := StronglyConnectedComponents(nlist, true) - if len(sccs) != 1 { - t.Fatalf("bad: %v", sccs) - } - - cycle := sccs[0] - if len(cycle) != 3 { - t.Fatalf("bad: %v", sccs) - } - - cycleNodes := make([]string, len(cycle)) - for i, c := range cycle { - cycleNodes[i] = c.(*BasicNode).Name - } - sort.Strings(cycleNodes) - - expected := []string{"a", "c", "g"} - if !reflect.DeepEqual(cycleNodes, expected) { - t.Fatalf("bad: %#v", cycleNodes) - } -} diff --git a/digraph/util.go b/digraph/util.go deleted file mode 100644 index 96a09ed82..000000000 --- a/digraph/util.go +++ /dev/null @@ -1,113 +0,0 @@ -package digraph - -// DepthFirstWalk performs a depth-first traversal of the nodes -// that can be reached from the initial input set. The callback is -// invoked for each visited node, and may return false to prevent -// vising any children of the current node -func DepthFirstWalk(node Node, cb func(n Node) bool) { - frontier := []Node{node} - seen := make(map[Node]struct{}) - for len(frontier) > 0 { - // Pop the current node - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check for potential cycle - if _, ok := seen[current]; ok { - continue - } - seen[current] = struct{}{} - - // Visit with the callback - if !cb(current) { - continue - } - - // Add any new edges to visit, in reverse order - edges := current.Edges() - for i := len(edges) - 1; i >= 0; i-- { - frontier = append(frontier, edges[i].Tail()) - } - } -} - -// FilterDegree returns only the nodes with the desired -// degree. This can be used with OutDegree or InDegree -func FilterDegree(degree int, degrees map[Node]int) []Node { - var matching []Node - for n, d := range degrees { - if d == degree { - matching = append(matching, n) - } - } - return matching -} - -// InDegree is used to compute the in-degree of nodes -func InDegree(nodes []Node) map[Node]int { - degree := make(map[Node]int, len(nodes)) - for _, n := range nodes { - if _, ok := degree[n]; !ok { - degree[n] = 0 - } - for _, e := range n.Edges() { - degree[e.Tail()]++ - } - } - return degree -} - -// OutDegree is used to compute the in-degree of nodes -func OutDegree(nodes []Node) map[Node]int { - degree := make(map[Node]int, len(nodes)) - for _, n := range nodes { - degree[n] = len(n.Edges()) - } - return degree -} - -// Sinks is used to get the nodes with out-degree of 0 -func Sinks(nodes []Node) []Node { - return FilterDegree(0, OutDegree(nodes)) -} - -// Sources is used to get the nodes with in-degree of 0 -func Sources(nodes []Node) []Node { - return FilterDegree(0, InDegree(nodes)) -} - -// Unreachable starts at a given start node, performs -// a DFS from there, and returns the set of unreachable nodes. -func Unreachable(start Node, nodes []Node) []Node { - // DFS from the start ndoe - frontier := []Node{start} - seen := make(map[Node]struct{}) - for len(frontier) > 0 { - // Pop the current node - n := len(frontier) - current := frontier[n-1] - frontier = frontier[:n-1] - - // Check for potential cycle - if _, ok := seen[current]; ok { - continue - } - seen[current] = struct{}{} - - // Add any new edges to visit, in reverse order - edges := current.Edges() - for i := len(edges) - 1; i >= 0; i-- { - frontier = append(frontier, edges[i].Tail()) - } - } - - // Check for any unseen nodes - var unseen []Node - for _, node := range nodes { - if _, ok := seen[node]; !ok { - unseen = append(unseen, node) - } - } - return unseen -} diff --git a/digraph/util_test.go b/digraph/util_test.go deleted file mode 100644 index e6d359991..000000000 --- a/digraph/util_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package digraph - -import ( - "reflect" - "testing" -) - -func TestDepthFirstWalk(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -a -> d -b -> e -d -> f -e -> a ; cycle`) - root := nodes["a"] - expected := []string{ - "a", - "b", - "e", - "c", - "d", - "f", - } - index := 0 - DepthFirstWalk(root, func(n Node) bool { - name := n.(*BasicNode).Name - if expected[index] != name { - t.Fatalf("expected: %v, got %v", expected[index], name) - } - index++ - return true - }) -} - -func TestInDegree(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -a -> d -b -> e -c -> e -d -> f`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - expected := map[string]int{ - "a": 0, - "b": 1, - "c": 1, - "d": 1, - "e": 2, - "f": 1, - } - indegree := InDegree(nlist) - for n, d := range indegree { - name := n.(*BasicNode).Name - exp := expected[name] - if exp != d { - t.Fatalf("Expected %d for %s, got %d", - exp, name, d) - } - } -} - -func TestOutDegree(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -a -> d -b -> e -c -> e -d -> f`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - expected := map[string]int{ - "a": 3, - "b": 1, - "c": 1, - "d": 1, - "e": 0, - "f": 0, - } - outDegree := OutDegree(nlist) - for n, d := range outDegree { - name := n.(*BasicNode).Name - exp := expected[name] - if exp != d { - t.Fatalf("Expected %d for %s, got %d", - exp, name, d) - } - } -} - -func TestSinks(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -a -> d -b -> e -c -> e -d -> f`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - sinks := Sinks(nlist) - - var haveE, haveF bool - for _, n := range sinks { - name := n.(*BasicNode).Name - switch name { - case "e": - haveE = true - case "f": - haveF = true - } - } - if !haveE || !haveF { - t.Fatalf("missing sink") - } -} - -func TestSources(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -a -> d -b -> e -c -> e -d -> f -x -> y`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - sources := Sources(nlist) - if len(sources) != 2 { - t.Fatalf("bad: %v", sources) - } - - var haveA, haveX bool - for _, n := range sources { - name := n.(*BasicNode).Name - switch name { - case "a": - haveA = true - case "x": - haveX = true - } - } - if !haveA || !haveX { - t.Fatalf("missing source %v %v", haveA, haveX) - } -} - -func TestUnreachable(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -a -> d -b -> e -c -> e -d -> f -f -> a -x -> y -y -> z`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - unreached := Unreachable(nodes["a"], nlist) - if len(unreached) != 3 { - t.Fatalf("bad: %v", unreached) - } - - var haveX, haveY, haveZ bool - for _, n := range unreached { - name := n.(*BasicNode).Name - switch name { - case "x": - haveX = true - case "y": - haveY = true - case "z": - haveZ = true - } - } - if !haveX || !haveY || !haveZ { - t.Fatalf("missing %v %v %v", haveX, haveY, haveZ) - } -} - -func TestUnreachable2(t *testing.T) { - nodes := ParseBasic(`a -> b -a -> c -a -> d -b -> e -c -> e -d -> f -f -> a -x -> y -y -> z`) - var nlist []Node - for _, n := range nodes { - nlist = append(nlist, n) - } - - unreached := Unreachable(nodes["x"], nlist) - if len(unreached) != 6 { - t.Fatalf("bad: %v", unreached) - } - - expected := map[string]struct{}{ - "a": struct{}{}, - "b": struct{}{}, - "c": struct{}{}, - "d": struct{}{}, - "e": struct{}{}, - "f": struct{}{}, - } - out := map[string]struct{}{} - for _, n := range unreached { - name := n.(*BasicNode).Name - out[name] = struct{}{} - } - - if !reflect.DeepEqual(out, expected) { - t.Fatalf("bad: %v %v", out, expected) - } -} diff --git a/docs/README.md b/docs/README.md index 76e365990..2ed0af4b4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,7 +4,7 @@ This directory contains some documentation about the Terraform Core codebase, aimed at readers who are interested in making code contributions. If you're looking for information on _using_ Terraform, please instead refer -to [the main Terraform CLI documentation](https://www.terraform.io/docs/cli-index.html). +to [the main Terraform CLI documentation](https://www.terraform.io/docs/cli/index.html). ## Terraform Core Architecture Documents diff --git a/docs/plugin-protocol/tfplugin5.2.proto b/docs/plugin-protocol/tfplugin5.2.proto index 4f365697a..7c5cada75 100644 --- a/docs/plugin-protocol/tfplugin5.2.proto +++ b/docs/plugin-protocol/tfplugin5.2.proto @@ -17,6 +17,7 @@ // branch or any other development branch. // syntax = "proto3"; +option go_package = "github.com/hashicorp/terraform/internal/tfplugin5"; package tfplugin5; diff --git a/flatmap/expand.go b/flatmap/expand.go deleted file mode 100644 index b9d15461e..000000000 --- a/flatmap/expand.go +++ /dev/null @@ -1,152 +0,0 @@ -package flatmap - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -// Expand takes a map and a key (prefix) and expands that value into -// a more complex structure. This is the reverse of the Flatten operation. -func Expand(m map[string]string, key string) interface{} { - // If the key is exactly a key in the map, just return it - if v, ok := m[key]; ok { - if v == "true" { - return true - } else if v == "false" { - return false - } - - return v - } - - // Check if the key is an array, and if so, expand the array - if v, ok := m[key+".#"]; ok { - // If the count of the key is unknown, then just put the unknown - // value in the value itself. This will be detected by Terraform - // core later. - if v == hcl2shim.UnknownVariableValue { - return v - } - - return expandArray(m, key) - } - - // Check if this is a prefix in the map - prefix := key + "." - for k := range m { - if strings.HasPrefix(k, prefix) { - return expandMap(m, prefix) - } - } - - return nil -} - -func expandArray(m map[string]string, prefix string) []interface{} { - num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) - if err != nil { - panic(err) - } - - // If the number of elements in this array is 0, then return an - // empty slice as there is nothing to expand. Trying to expand it - // anyway could lead to crashes as any child maps, arrays or sets - // that no longer exist are still shown as empty with a count of 0. - if num == 0 { - return []interface{}{} - } - - // NOTE: "num" is not necessarily accurate, e.g. if a user tampers - // with state, so the following code should not crash when given a - // number of items more or less than what's given in num. The - // num key is mainly just a hint that this is a list or set. - - // The Schema "Set" type stores its values in an array format, but - // using numeric hash values instead of ordinal keys. Take the set - // of keys regardless of value, and expand them in numeric order. - // See GH-11042 for more details. - keySet := map[int]bool{} - computed := map[string]bool{} - for k := range m { - if !strings.HasPrefix(k, prefix+".") { - continue - } - - key := k[len(prefix)+1:] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - - // skip the count value - if key == "#" { - continue - } - - // strip the computed flag if there is one - if strings.HasPrefix(key, "~") { - key = key[1:] - computed[key] = true - } - - k, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - keySet[int(k)] = true - } - - keysList := make([]int, 0, num) - for key := range keySet { - keysList = append(keysList, key) - } - sort.Ints(keysList) - - result := make([]interface{}, len(keysList)) - for i, key := range keysList { - keyString := strconv.Itoa(key) - if computed[keyString] { - keyString = "~" + keyString - } - result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) - } - - return result -} - -func expandMap(m map[string]string, prefix string) map[string]interface{} { - // Submaps may not have a '%' key, so we can't count on this value being - // here. If we don't have a count, just proceed as if we have have a map. - if count, ok := m[prefix+"%"]; ok && count == "0" { - return map[string]interface{}{} - } - - result := make(map[string]interface{}) - for k := range m { - if !strings.HasPrefix(k, prefix) { - continue - } - - key := k[len(prefix):] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - if _, ok := result[key]; ok { - continue - } - - // skip the map count value - if key == "%" { - continue - } - - result[key] = Expand(m, k[:len(prefix)+len(key)]) - } - - return result -} diff --git a/flatmap/expand_test.go b/flatmap/expand_test.go deleted file mode 100644 index 707c015da..000000000 --- a/flatmap/expand_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package flatmap - -import ( - "reflect" - "testing" - - "github.com/hashicorp/terraform/configs/hcl2shim" -) - -func TestExpand(t *testing.T) { - cases := []struct { - Map map[string]string - Key string - Output interface{} - }{ - { - Map: map[string]string{ - "foo": "bar", - "bar": "baz", - }, - Key: "foo", - Output: "bar", - }, - - { - Map: map[string]string{ - "foo.#": "2", - "foo.0": "one", - "foo.1": "two", - }, - Key: "foo", - Output: []interface{}{ - "one", - "two", - }, - }, - - { - Map: map[string]string{ - // # mismatches actual number of keys; actual number should - // "win" here, since the # is just a hint that this is a list. - "foo.#": "1", - "foo.0": "one", - "foo.1": "two", - "foo.2": "three", - }, - Key: "foo", - Output: []interface{}{ - "one", - "two", - "three", - }, - }, - - { - Map: map[string]string{ - // # mismatches actual number of keys; actual number should - // "win" here, since the # is just a hint that this is a list. - "foo.#": "5", - "foo.0": "one", - "foo.1": "two", - "foo.2": "three", - }, - Key: "foo", - Output: []interface{}{ - "one", - "two", - "three", - }, - }, - - { - Map: map[string]string{ - "foo.#": "1", - "foo.0.name": "bar", - "foo.0.port": "3000", - "foo.0.enabled": "true", - }, - Key: "foo", - Output: []interface{}{ - map[string]interface{}{ - "name": "bar", - "port": "3000", - "enabled": true, - }, - }, - }, - - { - Map: map[string]string{ - "foo.#": "1", - "foo.0.name": "bar", - "foo.0.ports.#": "2", - "foo.0.ports.0": "1", - "foo.0.ports.1": "2", - }, - Key: "foo", - Output: []interface{}{ - map[string]interface{}{ - "name": "bar", - "ports": []interface{}{ - "1", - "2", - }, - }, - }, - }, - - { - Map: map[string]string{ - "list_of_map.#": "2", - "list_of_map.0.%": "1", - "list_of_map.0.a": "1", - "list_of_map.1.%": "2", - "list_of_map.1.b": "2", - "list_of_map.1.c": "3", - }, - Key: "list_of_map", - Output: []interface{}{ - map[string]interface{}{ - "a": "1", - }, - map[string]interface{}{ - "b": "2", - "c": "3", - }, - }, - }, - - { - Map: map[string]string{ - "map_of_list.%": "2", - "map_of_list.list2.#": "1", - "map_of_list.list2.0": "c", - "map_of_list.list1.#": "2", - "map_of_list.list1.0": "a", - "map_of_list.list1.1": "b", - }, - Key: "map_of_list", - Output: map[string]interface{}{ - "list1": []interface{}{"a", "b"}, - "list2": []interface{}{"c"}, - }, - }, - - { - Map: map[string]string{ - "set.#": "3", - "set.1234": "a", - "set.1235": "b", - "set.1236": "c", - }, - Key: "set", - Output: []interface{}{"a", "b", "c"}, - }, - - { - Map: map[string]string{ - "computed_set.#": "1", - "computed_set.~1234.a": "a", - "computed_set.~1234.b": "b", - "computed_set.~1234.c": "c", - }, - Key: "computed_set", - Output: []interface{}{ - map[string]interface{}{"a": "a", "b": "b", "c": "c"}, - }, - }, - - { - Map: map[string]string{ - "struct.#": "1", - "struct.0.name": "hello", - "struct.0.rules.#": hcl2shim.UnknownVariableValue, - }, - Key: "struct", - Output: []interface{}{ - map[string]interface{}{ - "name": "hello", - "rules": hcl2shim.UnknownVariableValue, - }, - }, - }, - - { - Map: map[string]string{ - "struct.#": "1", - "struct.0.name": "hello", - "struct.0.set.#": "0", - "struct.0.set.0.key": "value", - }, - Key: "struct", - Output: []interface{}{ - map[string]interface{}{ - "name": "hello", - "set": []interface{}{}, - }, - }, - }, - - { - Map: map[string]string{ - "empty_map_of_sets.%": "0", - "empty_map_of_sets.set1.#": "0", - "empty_map_of_sets.set1.1234": "x", - }, - Key: "empty_map_of_sets", - Output: map[string]interface{}{}, - }, - } - - for _, tc := range cases { - t.Run(tc.Key, func(t *testing.T) { - actual := Expand(tc.Map, tc.Key) - if !reflect.DeepEqual(actual, tc.Output) { - t.Errorf( - "Key: %v\nMap:\n\n%#v\n\nOutput:\n\n%#v\n\nExpected:\n\n%#v\n", - tc.Key, - tc.Map, - actual, - tc.Output) - } - }) - } -} diff --git a/flatmap/flatten.go b/flatmap/flatten.go deleted file mode 100644 index 9ff6e4265..000000000 --- a/flatmap/flatten.go +++ /dev/null @@ -1,71 +0,0 @@ -package flatmap - -import ( - "fmt" - "reflect" -) - -// Flatten takes a structure and turns into a flat map[string]string. -// -// Within the "thing" parameter, only primitive values are allowed. Structs are -// not supported. Therefore, it can only be slices, maps, primitives, and -// any combination of those together. -// -// See the tests for examples of what inputs are turned into. -func Flatten(thing map[string]interface{}) Map { - result := make(map[string]string) - - for k, raw := range thing { - flatten(result, k, reflect.ValueOf(raw)) - } - - return Map(result) -} - -func flatten(result map[string]string, prefix string, v reflect.Value) { - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - result[prefix] = "true" - } else { - result[prefix] = "false" - } - case reflect.Int: - result[prefix] = fmt.Sprintf("%d", v.Int()) - case reflect.Map: - flattenMap(result, prefix, v) - case reflect.Slice: - flattenSlice(result, prefix, v) - case reflect.String: - result[prefix] = v.String() - default: - panic(fmt.Sprintf("Unknown: %s", v)) - } -} - -func flattenMap(result map[string]string, prefix string, v reflect.Value) { - for _, k := range v.MapKeys() { - if k.Kind() == reflect.Interface { - k = k.Elem() - } - - if k.Kind() != reflect.String { - panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) - } - - flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) - } -} - -func flattenSlice(result map[string]string, prefix string, v reflect.Value) { - prefix = prefix + "." - - result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) - for i := 0; i < v.Len(); i++ { - flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) - } -} diff --git a/flatmap/flatten_test.go b/flatmap/flatten_test.go deleted file mode 100644 index 1aa4940f8..000000000 --- a/flatmap/flatten_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package flatmap - -import ( - "reflect" - "testing" -) - -func TestFlatten(t *testing.T) { - cases := []struct { - Input map[string]interface{} - Output map[string]string - }{ - { - Input: map[string]interface{}{ - "foo": "bar", - "bar": "baz", - }, - Output: map[string]string{ - "foo": "bar", - "bar": "baz", - }, - }, - - { - Input: map[string]interface{}{ - "foo": []string{ - "one", - "two", - }, - }, - Output: map[string]string{ - "foo.#": "2", - "foo.0": "one", - "foo.1": "two", - }, - }, - - { - Input: map[string]interface{}{ - "foo": []map[interface{}]interface{}{ - map[interface{}]interface{}{ - "name": "bar", - "port": 3000, - "enabled": true, - }, - }, - }, - Output: map[string]string{ - "foo.#": "1", - "foo.0.name": "bar", - "foo.0.port": "3000", - "foo.0.enabled": "true", - }, - }, - - { - Input: map[string]interface{}{ - "foo": []map[interface{}]interface{}{ - map[interface{}]interface{}{ - "name": "bar", - "ports": []string{ - "1", - "2", - }, - }, - }, - }, - Output: map[string]string{ - "foo.#": "1", - "foo.0.name": "bar", - "foo.0.ports.#": "2", - "foo.0.ports.0": "1", - "foo.0.ports.1": "2", - }, - }, - } - - for _, tc := range cases { - actual := Flatten(tc.Input) - if !reflect.DeepEqual(actual, Map(tc.Output)) { - t.Fatalf( - "Input:\n\n%#v\n\nOutput:\n\n%#v\n\nExpected:\n\n%#v\n", - tc.Input, - actual, - tc.Output) - } - } -} diff --git a/flatmap/map.go b/flatmap/map.go deleted file mode 100644 index 46b72c401..000000000 --- a/flatmap/map.go +++ /dev/null @@ -1,82 +0,0 @@ -package flatmap - -import ( - "strings" -) - -// Map is a wrapper around map[string]string that provides some helpers -// above it that assume the map is in the format that flatmap expects -// (the result of Flatten). -// -// All modifying functions such as Delete are done in-place unless -// otherwise noted. -type Map map[string]string - -// Contains returns true if the map contains the given key. -func (m Map) Contains(key string) bool { - for _, k := range m.Keys() { - if k == key { - return true - } - } - - return false -} - -// Delete deletes a key out of the map with the given prefix. -func (m Map) Delete(prefix string) { - for k, _ := range m { - match := k == prefix - if !match { - if !strings.HasPrefix(k, prefix) { - continue - } - - if k[len(prefix):len(prefix)+1] != "." { - continue - } - } - - delete(m, k) - } -} - -// Keys returns all of the top-level keys in this map -func (m Map) Keys() []string { - ks := make(map[string]struct{}) - for k, _ := range m { - idx := strings.Index(k, ".") - if idx == -1 { - idx = len(k) - } - - ks[k[:idx]] = struct{}{} - } - - result := make([]string, 0, len(ks)) - for k, _ := range ks { - result = append(result, k) - } - - return result -} - -// Merge merges the contents of the other Map into this one. -// -// This merge is smarter than a simple map iteration because it -// will fully replace arrays and other complex structures that -// are present in this map with the other map's. For example, if -// this map has a 3 element "foo" list, and m2 has a 2 element "foo" -// list, then the result will be that m has a 2 element "foo" -// list. -func (m Map) Merge(m2 Map) { - for _, prefix := range m2.Keys() { - m.Delete(prefix) - - for k, v := range m2 { - if strings.HasPrefix(k, prefix) { - m[k] = v - } - } - } -} diff --git a/flatmap/map_test.go b/flatmap/map_test.go deleted file mode 100644 index e3b4cb1bd..000000000 --- a/flatmap/map_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package flatmap - -import ( - "reflect" - "sort" - "testing" -) - -func TestMapContains(t *testing.T) { - cases := []struct { - Input map[string]string - Key string - Result bool - }{ - { - Input: map[string]string{ - "foo": "bar", - "bar": "nope", - }, - Key: "foo", - Result: true, - }, - - { - Input: map[string]string{ - "foo": "bar", - "bar": "nope", - }, - Key: "baz", - Result: false, - }, - } - - for i, tc := range cases { - actual := Map(tc.Input).Contains(tc.Key) - if actual != tc.Result { - t.Fatalf("case %d bad: %#v", i, tc.Input) - } - } -} - -func TestMapDelete(t *testing.T) { - m := Flatten(map[string]interface{}{ - "foo": "bar", - "routes": []map[string]string{ - map[string]string{ - "foo": "bar", - }, - }, - }) - - m.Delete("routes") - - expected := Map(map[string]string{"foo": "bar"}) - if !reflect.DeepEqual(m, expected) { - t.Fatalf("bad: %#v", m) - } -} - -func TestMapKeys(t *testing.T) { - cases := []struct { - Input map[string]string - Output []string - }{ - { - Input: map[string]string{ - "foo": "bar", - "bar.#": "bar", - "bar.0.foo": "bar", - "bar.0.baz": "bar", - }, - Output: []string{ - "bar", - "foo", - }, - }, - } - - for _, tc := range cases { - actual := Map(tc.Input).Keys() - - // Sort so we have a consistent view of the output - sort.Strings(actual) - - if !reflect.DeepEqual(actual, tc.Output) { - t.Fatalf("input: %#v\n\nbad: %#v", tc.Input, actual) - } - } -} - -func TestMapMerge(t *testing.T) { - cases := []struct { - One map[string]string - Two map[string]string - Result map[string]string - }{ - { - One: map[string]string{ - "foo": "bar", - "bar": "nope", - }, - Two: map[string]string{ - "bar": "baz", - "baz": "buz", - }, - Result: map[string]string{ - "foo": "bar", - "bar": "baz", - "baz": "buz", - }, - }, - } - - for i, tc := range cases { - Map(tc.One).Merge(Map(tc.Two)) - if !reflect.DeepEqual(tc.One, tc.Result) { - t.Fatalf("case %d bad: %#v", i, tc.One) - } - } -} diff --git a/go.mod b/go.mod index 64ed71395..635bdd74b 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module github.com/hashicorp/terraform require ( - cloud.google.com/go v0.45.1 + cloud.google.com/go/storage v1.10.0 github.com/Azure/azure-sdk-for-go v47.1.0+incompatible - github.com/Azure/go-autorest/autorest v0.11.10 + github.com/Azure/go-autorest/autorest v0.11.10 github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect github.com/ChrisTrenkamp/goxpath v0.0.0-20190607011252-c5096ec8773d // indirect github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect @@ -18,14 +18,12 @@ require ( github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go v1.31.9 + github.com/aws/aws-sdk-go v1.37.0 github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect github.com/bgentry/speakeasy v0.1.0 github.com/bmatcuk/doublestar v1.1.5 github.com/boltdb/bolt v1.3.1 // indirect - github.com/chzyer/logex v1.1.10 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e - github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect github.com/coreos/bbolt v1.3.0 // indirect github.com/coreos/etcd v3.3.10+incompatible github.com/coreos/go-semver v0.2.0 // indirect @@ -36,11 +34,10 @@ require ( github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1 github.com/go-test/deep v1.0.3 github.com/gofrs/uuid v3.3.0+incompatible // indirect - github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 // indirect - github.com/golang/mock v1.3.1 - github.com/golang/protobuf v1.3.4 + github.com/golang/mock v1.4.4 + github.com/golang/protobuf v1.4.3 github.com/google/go-cmp v0.5.2 - github.com/google/uuid v1.1.1 + github.com/google/uuid v1.1.2 github.com/gophercloud/gophercloud v0.10.1-0.20200424014253-c3bfe50899e5 github.com/gophercloud/utils v0.0.0-20200423144003-7c72efc7435d github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect @@ -54,12 +51,12 @@ require ( github.com/hashicorp/go-azure-helpers v0.13.0 github.com/hashicorp/go-checkpoint v0.5.0 github.com/hashicorp/go-cleanhttp v0.5.1 - github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02 - github.com/hashicorp/go-hclog v0.14.2-0.20201022192508-e59fd7e11108 + github.com/hashicorp/go-getter v1.5.1 + github.com/hashicorp/go-hclog v0.15.0 github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa // indirect github.com/hashicorp/go-msgpack v0.5.4 // indirect github.com/hashicorp/go-multierror v1.1.0 - github.com/hashicorp/go-plugin v1.3.0 + github.com/hashicorp/go-plugin v1.4.0 github.com/hashicorp/go-retryablehttp v0.5.2 github.com/hashicorp/go-rootcerts v1.0.0 // indirect github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 // indirect @@ -67,28 +64,26 @@ require ( github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/go-version v1.2.0 github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f - github.com/hashicorp/hcl/v2 v2.7.0 + github.com/hashicorp/hcl/v2 v2.8.3-0.20210208211639-2520246c49a7 github.com/hashicorp/memberlist v0.1.0 // indirect github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb // indirect - github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 + github.com/hashicorp/terraform-config-inspect v0.0.0-20210209133302-4fd17a0faac2 github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect - github.com/imdario/mergo v0.3.9 // indirect - github.com/jmespath/go-jmespath v0.3.0 + github.com/jmespath/go-jmespath v0.4.0 github.com/jonboulle/clockwork v0.1.0 // indirect github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 github.com/jtolds/gls v4.2.1+incompatible // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 - github.com/lib/pq v1.0.0 + github.com/lib/pq v1.8.0 github.com/likexian/gokit v0.20.15 github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 // indirect github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88 - github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-isatty v0.0.12 github.com/mattn/go-shellwords v1.0.4 github.com/miekg/dns v1.0.8 // indirect - github.com/mitchellh/cli v1.1.0 + github.com/mitchellh/cli v1.1.2 github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/go-homedir v1.1.0 @@ -101,7 +96,7 @@ require ( github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c // indirect - github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 + github.com/pkg/browser v0.0.0-20201207095918-0426ae3fba23 github.com/pkg/errors v0.9.1 github.com/posener/complete v1.2.1 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 // indirect @@ -118,22 +113,23 @@ require ( github.com/xanzy/ssh-agent v0.2.1 github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557 - github.com/zclconf/go-cty v1.6.2-0.20201013200640-e5225636c8c2 + github.com/zclconf/go-cty v1.7.1 github.com/zclconf/go-cty-yaml v1.0.2 go.uber.org/atomic v1.3.2 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.9.1 // indirect golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 - golang.org/x/mod v0.2.0 - golang.org/x/net v0.0.0-20200602114024-627f9648deb9 - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd - golang.org/x/text v0.3.2 - golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371 - google.golang.org/api v0.9.0 - google.golang.org/grpc v1.27.1 + golang.org/x/mod v0.3.0 + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 + golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf + golang.org/x/text v0.3.3 + golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb + google.golang.org/api v0.34.0 + google.golang.org/grpc v1.31.1 + google.golang.org/protobuf v1.25.0 gopkg.in/ini.v1 v1.42.0 // indirect - gopkg.in/yaml.v2 v2.2.8 k8s.io/api v0.0.0-20190620084959-7cf5895f2711 k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 k8s.io/client-go v10.0.0+incompatible @@ -143,3 +139,5 @@ require ( replace k8s.io/client-go => k8s.io/client-go v0.0.0-20190620085101-78d2af792bab go 1.14 + +replace google.golang.org/grpc v1.31.1 => google.golang.org/grpc v1.27.1 diff --git a/go.sum b/go.sum index ed806984c..8635bcb93 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,35 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v45.0.0+incompatible h1:/bZYPaJLCqXeCqQqEeEIQg/p7RNafOhaVFhC6IWxZ/8= github.com/Azure/azure-sdk-for-go v45.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v47.1.0+incompatible h1:D6MsWmsxF+pEjN/yZDyKXoUrsamdBdTlPedIgBlvVx4= @@ -50,6 +77,12 @@ github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNf github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= github.com/ChrisTrenkamp/goxpath v0.0.0-20190607011252-c5096ec8773d h1:W1diKnDQkXxNDhghdBSbQ4LI/E1aJNTwpqPp3KtlB8w= github.com/ChrisTrenkamp/goxpath v0.0.0-20190607011252-c5096ec8773d/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -94,6 +127,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.31.9 h1:n+b34ydVfgC30j0Qm69yaapmjejQPW2BoDBX7Uy/tLI= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= @@ -115,6 +150,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/bbolt v1.3.0 h1:HIgH5xUWXT914HCI671AxuTTqjj64UOFr7pHn48LUTI= github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= @@ -141,7 +177,9 @@ github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1 h1:r1oACdS2XYiA github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -153,6 +191,9 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -176,19 +217,38 @@ github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -197,6 +257,10 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -207,11 +271,21 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -256,6 +330,11 @@ github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7q github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.14.2-0.20201022192508-e59fd7e11108 h1:njjwC6QCapHoavbrnzrenpovKeHauePE1wPVdflPvRU= github.com/hashicorp/go-hclog v0.14.2-0.20201022192508-e59fd7e11108/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-getter v1.5.1 h1:lM9sM02nvEApQGFgkXxWbhfqtyN+AyhQmi+MaMdBDOI= +github.com/hashicorp/go-getter v1.5.1/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0 h1:qMuK0wxsoW4D0ddCCYwPSTm4KQv1X1ke3WmPWZ0Mvsk= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa h1:0nA8i+6Rwqaq9xlpmVxxTwk6rxiEhX+E6Wh4vPNHiS8= github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw= github.com/hashicorp/go-msgpack v0.5.4 h1:SFT72YqIkOcLdWJUYcriVX7hbrZpwc/f7h8aW2NUqrA= @@ -266,6 +345,8 @@ github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.4.0 h1:b0O7rs5uiJ99Iu9HugEzsM67afboErkHUWddUSpUO3A= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4= github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= @@ -291,14 +372,18 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= -github.com/hashicorp/hcl/v2 v2.7.0 h1:IU8qz5UzZ1po3M1D9/Kq6S5zbDGVfI9bnzmC1ogKKmI= -github.com/hashicorp/hcl/v2 v2.7.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= +github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY= +github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= +github.com/hashicorp/hcl/v2 v2.8.3-0.20210208211639-2520246c49a7 h1:9YW4rFk/VhcwWW2Mu0SuMO4/ygk6dg7EIRqPd2avDXQ= +github.com/hashicorp/hcl/v2 v2.8.3-0.20210208211639-2520246c49a7/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/memberlist v0.1.0 h1:qSsCiC0WYD39lbSitKNt40e30uorm2Ss/d4JGU1hzH8= github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE= github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb h1:ZbgmOQt8DOg796figP87/EFCVx2v2h9yRvwHF/zceX4= github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= +github.com/hashicorp/terraform-config-inspect v0.0.0-20210209133302-4fd17a0faac2 h1:l+bLFvHjqtgNQwWxwrFX9PemGAAO2P1AGZM7zlMNvCs= +github.com/hashicorp/terraform-config-inspect v0.0.0-20210209133302-4fd17a0faac2/go.mod h1:Z0Nnk4+3Cy89smEbrq+sl1bxc9198gIP4I7wcQF6Kqs= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= @@ -306,14 +391,21 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 h1:kie3qOosvRKqwij2HGzXWffwpXvcqfPPXRUw8I4F/mg= @@ -323,6 +415,8 @@ github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBv github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -341,8 +435,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/likexian/gokit v0.0.0-20190309162924-0a377eecf7aa/go.mod h1:QdfYv6y6qPA9pbBA2qXtoT8BMKha6UyNbxWGWl/9Jfk= github.com/likexian/gokit v0.0.0-20190418170008-ace88ad0983b/go.mod h1:KKqSnk/VVSW8kEyO2vVCXoanzEutKdlBAPohmGXkxCk= github.com/likexian/gokit v0.0.0-20190501133040-e77ea8b19cdc/go.mod h1:3kvONayqCaj+UgrRZGpgfXzHdMYCAO0KAt4/8n0L57Y= @@ -361,9 +455,8 @@ github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEb github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88 h1:cxuVcCvCLD9yYDbRCWw0jSgh1oT6P6mv3aJDKK5o7X4= github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88/go.mod h1:a2HXwefeat3evJHxFXSayvRHpYEPJYtErl4uIzfaUqY= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -377,8 +470,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.8 h1:Zi8HNpze3NeRWH1PQV6O71YcvJRQ6j0lORO6DAEmAAI= github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= +github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= @@ -433,8 +526,8 @@ github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/browser v0.0.0-20201207095918-0426ae3fba23 h1:dofHuld+js7eKSemxqTVIo8yRlpRw+H1SdpzZxWruBc= +github.com/pkg/browser v0.0.0-20201207095918-0426ae3fba23/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -457,6 +550,7 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -483,8 +577,11 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= github.com/tencentcloud/tencentcloud-sdk-go v3.0.82+incompatible h1:5Td2b0yfaOvw9M9nZ5Oav6Li9bxUNxt4DgxMfIPpsa0= @@ -497,8 +594,8 @@ github.com/tombuildsstuff/giovanni v0.14.0 h1:vBgZJHNs8p42Nj4GaffPe7nzs2Z2qIyKUN github.com/tombuildsstuff/giovanni v0.14.0/go.mod h1:0TZugJPEtqzPlMpuJHYfXY6Dq2uLPrXf98D2XQSxNbA= github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 h1:cMjKdf4PxEBN9K5HaD9UMW8gkTbM0kMzkTa9SJe0WNQ= github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= -github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= @@ -511,16 +608,24 @@ github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4M github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557 h1:Jpn2j6wHkC9wJv5iMfJhKqrZJx3TahFx+7sbZ7zQdxs= github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.6.2-0.20201013200640-e5225636c8c2 h1:17Cl7LwqZt2t95i3qzcMlOWH8LfzMqaJDjjRV9N/ae4= -github.com/zclconf/go-cty v1.6.2-0.20201013200640-e5225636c8c2/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o= +github.com/zclconf/go-cty v1.7.1 h1:AvsC01GMhMLFL8CgEYdHGM+yLnnDOwhPAYcgTkeF0Gw= +github.com/zclconf/go-cty v1.7.1/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o= github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0= github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -533,6 +638,7 @@ golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -541,17 +647,41 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -570,27 +700,49 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -608,23 +760,50 @@ golang.org/x/sys v0.0.0-20190509141414-a5b02f93d862/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -637,19 +816,67 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371 h1:Cjq6sG3gnKDchzWy7ouGQklhxMtWvh4AhSNJ0qGIeo4= golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858 h1:xLt+iB5ksWcZVxqc+g9K41ZHy+6MKWfXCDsjSThnsPA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb h1:KVWk3RW1AZlxWum4tYqegLgwJHb5oouozcGM8HfNQaw= +golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.34.0 h1:k40adF3uR+6x/+hO5Dh4ZFUqFp67vxvbpafFiJxl10A= +google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -657,6 +884,8 @@ google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -666,19 +895,63 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr11OJ+54YeCMCGYIygTA7R/YZxH5M= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -691,12 +964,18 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50= k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= @@ -716,6 +995,8 @@ k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3 k8s.io/utils v0.0.0-20200411171748-3d5a2fe318e4 h1:vEYeh6f+jz98bCG4BHRQ733tuZpjzsJ+C/xv8awA0qM= k8s.io/utils v0.0.0-20200411171748-3d5a2fe318e4/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/help.go b/help.go index 64eb03273..16bc32095 100644 --- a/help.go +++ b/help.go @@ -41,7 +41,7 @@ func helpFunc(commands map[string]cli.CommandFactory) string { sort.Strings(otherCommands) // The output produced by this is included in the docs at - // website/source/docs/commands/index.html.markdown; if you + // website/source/docs/cli/commands/index.html.markdown; if you // change this then consider updating that to match. helpText := fmt.Sprintf(` Usage: terraform [global options] [args] diff --git a/helper/README.md b/helper/README.md deleted file mode 100644 index 2bcbe88e4..000000000 --- a/helper/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Legacy Helper Libraries - -The packages in this directory are all legacy code. Some of them are legacy -because they are now maintained in -[the Terraform SDK](https://github.com/hashicorp/terraform-plugin-sdk), -while others are just obsolete codepaths that we intend to migrate away -from over time. - -Avoid using functions from packages under `helper/` in new projects. diff --git a/helper/experiment/experiment.go b/helper/experiment/experiment.go deleted file mode 100644 index 72fdeaf9e..000000000 --- a/helper/experiment/experiment.go +++ /dev/null @@ -1,158 +0,0 @@ -// experiment package contains helper functions for tracking experimental -// features throughout Terraform. -// -// This package should be used for creating, enabling, querying, and deleting -// experimental features. By unifying all of that onto a single interface, -// we can have the Go compiler help us by enforcing every place we touch -// an experimental feature. -// -// To create a new experiment: -// -// 1. Add the experiment to the global vars list below, prefixed with X_ -// -// 2. Add the experiment variable to the All listin the init() function -// -// 3. Use it! -// -// To remove an experiment: -// -// 1. Delete the experiment global var. -// -// 2. Try to compile and fix all the places where the var was referenced. -// -// To use an experiment: -// -// 1. Use Flag() if you want the experiment to be available from the CLI. -// -// 2. Use Enabled() to check whether it is enabled. -// -// As a general user: -// -// 1. The `-Xexperiment-name` flag -// 2. The `TF_X_` env var. -// 3. The `TF_X_FORCE` env var can be set to force an experimental feature -// without human verifications. -// -package experiment - -import ( - "flag" - "fmt" - "os" - "strconv" - "strings" - "sync" -) - -// The experiments that are available are listed below. Any package in -// Terraform defining an experiment should define the experiments below. -// By keeping them all within the experiment package we force a single point -// of definition and use. This allows the compiler to enforce references -// so it becomes easy to remove the features. -var ( - // Shadow graph. This is already on by default. Disabling it will be - // allowed for awhile in order for it to not block operations. - X_shadow = newBasicID("shadow", "SHADOW", false) - - // Concise plan diff output - X_concise_diff = newBasicID("concise_diff", "CONCISE_DIFF", true) -) - -// Global variables this package uses because we are a package -// with global state. -var ( - // all is the list of all experiements. Do not modify this. - All []ID - - // enabled keeps track of what flags have been enabled - enabled map[string]bool - enabledLock sync.Mutex - - // Hidden "experiment" that forces all others to be on without verification - x_force = newBasicID("force", "FORCE", false) -) - -func init() { - // The list of all experiments, update this when an experiment is added. - All = []ID{ - X_shadow, - X_concise_diff, - x_force, - } - - // Load - reload() -} - -// reload is used by tests to reload the global state. This is called by -// init publicly. -func reload() { - // Initialize - enabledLock.Lock() - enabled = make(map[string]bool) - enabledLock.Unlock() - - // Set defaults and check env vars - for _, id := range All { - // Get the default value - def := id.Default() - - // If we set it in the env var, default it to true - key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env())) - if v := os.Getenv(key); v != "" { - def = v != "0" - } - - // Set the default - SetEnabled(id, def) - } -} - -// Enabled returns whether an experiment has been enabled or not. -func Enabled(id ID) bool { - enabledLock.Lock() - defer enabledLock.Unlock() - return enabled[id.Flag()] -} - -// SetEnabled sets an experiment to enabled/disabled. Please check with -// the experiment docs for when calling this actually affects the experiment. -func SetEnabled(id ID, v bool) { - enabledLock.Lock() - defer enabledLock.Unlock() - enabled[id.Flag()] = v -} - -// Force returns true if the -Xforce of TF_X_FORCE flag is present, which -// advises users of this package to not verify with the user that they want -// experimental behavior and to just continue with it. -func Force() bool { - return Enabled(x_force) -} - -// Flag configures the given FlagSet with the flags to configure -// all active experiments. -func Flag(fs *flag.FlagSet) { - for _, id := range All { - desc := id.Flag() - key := fmt.Sprintf("X%s", id.Flag()) - fs.Var(&idValue{X: id}, key, desc) - } -} - -// idValue implements flag.Value for setting the enabled/disabled state -// of an experiment from the CLI. -type idValue struct { - X ID -} - -func (v *idValue) IsBoolFlag() bool { return true } -func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) } -func (v *idValue) Set(raw string) error { - b, err := strconv.ParseBool(raw) - if err == nil { - SetEnabled(v.X, b) - } - - return err -} diff --git a/helper/experiment/experiment_test.go b/helper/experiment/experiment_test.go deleted file mode 100644 index 32055c2c8..000000000 --- a/helper/experiment/experiment_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package experiment - -import ( - "flag" - "fmt" - "os" - "testing" -) - -// Test experiments -var ( - X_test1 = newBasicID("test1", "TEST1", false) - X_test2 = newBasicID("test2", "TEST2", true) -) - -// Reinitializes the package to a clean slate -func testReinit() { - All = []ID{X_test1, X_test2, x_force} - reload() -} - -func init() { - testReinit() - - // Clear all env vars so they don't affect tests - for _, id := range All { - os.Unsetenv(fmt.Sprintf("TF_X_%s", id.Env())) - } -} - -func TestDefault(t *testing.T) { - testReinit() - - if Enabled(X_test1) { - t.Fatal("test1 should not be enabled") - } - - if !Enabled(X_test2) { - t.Fatal("test2 should be enabled") - } -} - -func TestEnv(t *testing.T) { - os.Setenv("TF_X_TEST2", "0") - defer os.Unsetenv("TF_X_TEST2") - - testReinit() - - if Enabled(X_test2) { - t.Fatal("test2 should be enabled") - } -} - -func TestFlag(t *testing.T) { - testReinit() - - // Verify default - if !Enabled(X_test2) { - t.Fatal("test2 should be enabled") - } - - // Setup a flag set - fs := flag.NewFlagSet("test", flag.ContinueOnError) - Flag(fs) - fs.Parse([]string{"-Xtest2=false"}) - - if Enabled(X_test2) { - t.Fatal("test2 should not be enabled") - } -} - -func TestFlag_overEnv(t *testing.T) { - os.Setenv("TF_X_TEST2", "1") - defer os.Unsetenv("TF_X_TEST2") - - testReinit() - - // Verify default - if !Enabled(X_test2) { - t.Fatal("test2 should be enabled") - } - - // Setup a flag set - fs := flag.NewFlagSet("test", flag.ContinueOnError) - Flag(fs) - fs.Parse([]string{"-Xtest2=false"}) - - if Enabled(X_test2) { - t.Fatal("test2 should not be enabled") - } -} - -func TestForce(t *testing.T) { - os.Setenv("TF_X_FORCE", "1") - defer os.Unsetenv("TF_X_FORCE") - - testReinit() - - if !Force() { - t.Fatal("should force") - } -} - -func TestForce_flag(t *testing.T) { - os.Unsetenv("TF_X_FORCE") - - testReinit() - - // Setup a flag set - fs := flag.NewFlagSet("test", flag.ContinueOnError) - Flag(fs) - fs.Parse([]string{"-Xforce"}) - - if !Force() { - t.Fatal("should force") - } -} diff --git a/helper/experiment/id.go b/helper/experiment/id.go deleted file mode 100644 index 8e2f70732..000000000 --- a/helper/experiment/id.go +++ /dev/null @@ -1,34 +0,0 @@ -package experiment - -// ID represents an experimental feature. -// -// The global vars defined on this package should be used as ID values. -// This interface is purposely not implement-able outside of this package -// so that we can rely on the Go compiler to enforce all experiment references. -type ID interface { - Env() string - Flag() string - Default() bool - - unexported() // So the ID can't be implemented externally. -} - -// basicID implements ID. -type basicID struct { - EnvValue string - FlagValue string - DefaultValue bool -} - -func newBasicID(flag, env string, def bool) ID { - return &basicID{ - EnvValue: env, - FlagValue: flag, - DefaultValue: def, - } -} - -func (id *basicID) Env() string { return id.EnvValue } -func (id *basicID) Flag() string { return id.FlagValue } -func (id *basicID) Default() bool { return id.DefaultValue } -func (id *basicID) unexported() {} diff --git a/helper/plugin/doc.go b/helper/plugin/doc.go deleted file mode 100644 index 82b5937bf..000000000 --- a/helper/plugin/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package plugin contains types and functions to help Terraform plugins -// implement the plugin rpc interface. -// The primary Provider type will be responsible for converting from the grpc -// wire protocol to the types and methods known to the provider -// implementations. -package plugin diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go deleted file mode 100644 index 06ebaf421..000000000 --- a/helper/plugin/grpc_provider.go +++ /dev/null @@ -1,1436 +0,0 @@ -package plugin - -import ( - "encoding/json" - "fmt" - "log" - "strconv" - - "github.com/zclconf/go-cty/cty" - ctyconvert "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/msgpack" - context "golang.org/x/net/context" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/terraform" -) - -const newExtraKey = "_new_extra_shim" - -// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a -// proto.ProviderServer implementation. If the provided provider is not a -// *schema.Provider, this will return nil, -func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer { - sp, ok := p.(*schema.Provider) - if !ok { - return nil - } - - return &GRPCProviderServer{ - provider: sp, - } -} - -// GRPCProviderServer handles the server, or plugin side of the rpc connection. -type GRPCProviderServer struct { - provider *schema.Provider -} - -func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { - // Here we are certain that the provider is being called through grpc, so - // make sure the feature flag for helper/schema is set - schema.SetProto5() - - resp := &proto.GetProviderSchema_Response{ - ResourceSchemas: make(map[string]*proto.Schema), - DataSourceSchemas: make(map[string]*proto.Schema), - } - - resp.Provider = &proto.Schema{ - Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), - } - - resp.ProviderMeta = &proto.Schema{ - Block: convert.ConfigSchemaToProto(s.getProviderMetaSchemaBlock()), - } - - for typ, res := range s.provider.ResourcesMap { - resp.ResourceSchemas[typ] = &proto.Schema{ - Version: int64(res.SchemaVersion), - Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), - } - } - - for typ, dat := range s.provider.DataSourcesMap { - resp.DataSourceSchemas[typ] = &proto.Schema{ - Version: int64(dat.SchemaVersion), - Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), - } - } - - return resp, nil -} - -func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { - return schema.InternalMap(s.provider.Schema).CoreConfigSchema() -} - -func (s *GRPCProviderServer) getProviderMetaSchemaBlock() *configschema.Block { - return schema.InternalMap(s.provider.ProviderMetaSchema).CoreConfigSchema() -} - -func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { - res := s.provider.ResourcesMap[name] - return res.CoreConfigSchema() -} - -func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { - dat := s.provider.DataSourcesMap[name] - return dat.CoreConfigSchema() -} - -func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) { - resp := &proto.PrepareProviderConfig_Response{} - - schemaBlock := s.getProviderSchemaBlock() - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // lookup any required, top-level attributes that are Null, and see if we - // have a Default value available. - configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { - // we're only looking for top-level attributes - if len(path) != 1 { - return val, nil - } - - // nothing to do if we already have a value - if !val.IsNull() { - return val, nil - } - - // get the Schema definition for this attribute - getAttr, ok := path[0].(cty.GetAttrStep) - // these should all exist, but just ignore anything strange - if !ok { - return val, nil - } - - attrSchema := s.provider.Schema[getAttr.Name] - // continue to ignore anything that doesn't match - if attrSchema == nil { - return val, nil - } - - // this is deprecated, so don't set it - if attrSchema.Deprecated != "" || attrSchema.Removed != "" { - return val, nil - } - - // find a default value if it exists - def, err := attrSchema.DefaultValue() - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) - return val, err - } - - // no default - if def == nil { - return val, nil - } - - // create a cty.Value and make sure it's the correct type - tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) - - // helper/schema used to allow setting "" to a bool - if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { - // return a warning about the conversion - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) - tmpVal = cty.False - } - - val, err = ctyconvert.Convert(tmpVal, val.Type()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) - } - - return val, err - }) - if err != nil { - // any error here was already added to the diagnostics - return resp, nil - } - - configVal, err = schemaBlock.CoerceValue(configVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.Validate(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP} - - return resp, nil -} - -func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) { - resp := &proto.ValidateResourceTypeConfig_Response{} - - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.ValidateResource(req.TypeName, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - return resp, nil -} - -func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) { - resp := &proto.ValidateDataSourceConfig_Response{} - - schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - warns, errs := s.provider.ValidateDataSource(req.TypeName, config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - return resp, nil -} - -func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { - resp := &proto.UpgradeResourceState_Response{} - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - version := int(req.Version) - - jsonMap := map[string]interface{}{} - var err error - - switch { - // We first need to upgrade a flatmap state if it exists. - // There should never be both a JSON and Flatmap state in the request. - case len(req.RawState.Flatmap) > 0: - jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - // if there's a JSON state, we need to decode it. - case len(req.RawState.Json) > 0: - err = json.Unmarshal(req.RawState.Json, &jsonMap) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - default: - log.Println("[DEBUG] no state provided to upgrade") - return resp, nil - } - - // complete the upgrade of the JSON states - jsonMap, err = s.upgradeJSONState(version, jsonMap, res) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // The provider isn't required to clean out removed fields - s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) - - // now we need to turn the state into the default json representation, so - // that it can be re-decoded using the actual schema. - val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Now we need to make sure blocks are represented correctly, which means - // that missing blocks are empty collections, rather than null. - // First we need to CoerceValue to ensure that all object types match. - val, err = schemaBlock.CoerceValue(val) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - // Normalize the value and fill in any missing blocks. - val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) - - // encode the final state to the expected msgpack format - newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP} - return resp, nil -} - -// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate -// state if necessary, and converts it to the new JSON state format decoded as a -// map[string]interface{}. -// upgradeFlatmapState returns the json map along with the corresponding schema -// version. -func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { - // this will be the version we've upgraded so, defaulting to the given - // version in case no migration was called. - upgradedVersion := version - - // first determine if we need to call the legacy MigrateState func - requiresMigrate := version < res.SchemaVersion - - schemaType := res.CoreConfigSchema().ImpliedType() - - // if there are any StateUpgraders, then we need to only compare - // against the first version there - if len(res.StateUpgraders) > 0 { - requiresMigrate = version < res.StateUpgraders[0].Version - } - - if requiresMigrate && res.MigrateState == nil { - // Providers were previously allowed to bump the version - // without declaring MigrateState. - // If there are further upgraders, then we've only updated that far. - if len(res.StateUpgraders) > 0 { - schemaType = res.StateUpgraders[0].Type - upgradedVersion = res.StateUpgraders[0].Version - } - } else if requiresMigrate { - is := &terraform.InstanceState{ - ID: m["id"], - Attributes: m, - Meta: map[string]interface{}{ - "schema_version": strconv.Itoa(version), - }, - } - - is, err := res.MigrateState(version, is, s.provider.Meta()) - if err != nil { - return nil, 0, err - } - - // re-assign the map in case there was a copy made, making sure to keep - // the ID - m := is.Attributes - m["id"] = is.ID - - // if there are further upgraders, then we've only updated that far - if len(res.StateUpgraders) > 0 { - schemaType = res.StateUpgraders[0].Type - upgradedVersion = res.StateUpgraders[0].Version - } - } else { - // the schema version may be newer than the MigrateState functions - // handled and older than the current, but still stored in the flatmap - // form. If that's the case, we need to find the correct schema type to - // convert the state. - for _, upgrader := range res.StateUpgraders { - if upgrader.Version == version { - schemaType = upgrader.Type - break - } - } - } - - // now we know the state is up to the latest version that handled the - // flatmap format state. Now we can upgrade the format and continue from - // there. - newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) - if err != nil { - return nil, 0, err - } - - jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType) - return jsonMap, upgradedVersion, err -} - -func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { - var err error - - for _, upgrader := range res.StateUpgraders { - if version != upgrader.Version { - continue - } - - m, err = upgrader.Upgrade(m, s.provider.Meta()) - if err != nil { - return nil, err - } - version++ - } - - return m, nil -} - -// Remove any attributes no longer present in the schema, so that the json can -// be correctly decoded. -func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { - // we're only concerned with finding maps that corespond to object - // attributes - switch v := v.(type) { - case []interface{}: - // If these aren't blocks the next call will be a noop - if ty.IsListType() || ty.IsSetType() { - eTy := ty.ElementType() - for _, eV := range v { - s.removeAttributes(eV, eTy) - } - } - return - case map[string]interface{}: - // map blocks aren't yet supported, but handle this just in case - if ty.IsMapType() { - eTy := ty.ElementType() - for _, eV := range v { - s.removeAttributes(eV, eTy) - } - return - } - - if ty == cty.DynamicPseudoType { - log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) - return - } - - if !ty.IsObjectType() { - // This shouldn't happen, and will fail to decode further on, so - // there's no need to handle it here. - log.Printf("[WARN] unexpected type %#v for map in json state", ty) - return - } - - attrTypes := ty.AttributeTypes() - for attr, attrV := range v { - attrTy, ok := attrTypes[attr] - if !ok { - log.Printf("[DEBUG] attribute %q no longer present in schema", attr) - delete(v, attr) - continue - } - - s.removeAttributes(attrV, attrTy) - } - } -} - -func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { - resp := &proto.Stop_Response{} - - err := s.provider.Stop() - if err != nil { - resp.Error = err.Error() - } - - return resp, nil -} - -func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { - resp := &proto.Configure_Response{} - - schemaBlock := s.getProviderSchemaBlock() - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - s.provider.TerraformVersion = req.TerraformVersion - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - err = s.provider.Configure(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - - return resp, nil -} - -func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { - resp := &proto.ReadResource_Response{ - // helper/schema did previously handle private data during refresh, but - // core is now going to expect this to be maintained in order to - // persist it in the state. - Private: req.Private, - } - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - instanceState, err := res.ShimInstanceStateFromValue(stateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - private := make(map[string]interface{}) - if len(req.Private) > 0 { - if err := json.Unmarshal(req.Private, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - instanceState.Meta = private - - pmSchemaBlock := s.getProviderMetaSchemaBlock() - if pmSchemaBlock != nil && req.ProviderMeta != nil { - providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - instanceState.ProviderMeta = providerSchemaVal - } - - newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - if newInstanceState == nil || newInstanceState.ID == "" { - // The old provider API used an empty id to signal that the remote - // object appears to have been deleted, but our new protocol expects - // to see a null value (in the cty sense) in that case. - newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil - } - - // helper/schema should always copy the ID over, but do it again just to be safe - newInstanceState.Attributes["id"] = newInstanceState.ID - - newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = normalizeNullValues(newStateVal, stateVal, false) - newStateVal = copyTimeoutValues(newStateVal, stateVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - - return resp, nil -} - -func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { - resp := &proto.PlanResourceChange_Response{} - - // This is a signal to Terraform Core that we're doing the best we can to - // shim the legacy type system of the SDK onto the Terraform type system - // but we need it to cut us some slack. This setting should not be taken - // forward to any new SDK implementations, since setting it prevents us - // from catching certain classes of provider bug that can lead to - // confusing downstream errors. - resp.LegacyTypeSystem = true - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - create := priorStateVal.IsNull() - - proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // We don't usually plan destroys, but this can return early in any case. - if proposedNewStateVal.IsNull() { - resp.PlannedState = req.ProposedNewState - resp.PlannedPrivate = req.PriorPrivate - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - priorState, err := res.ShimInstanceStateFromValue(priorStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - priorPrivate := make(map[string]interface{}) - if len(req.PriorPrivate) > 0 { - if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - priorState.Meta = priorPrivate - - pmSchemaBlock := s.getProviderMetaSchemaBlock() - if pmSchemaBlock != nil && req.ProviderMeta != nil { - providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - priorState.ProviderMeta = providerSchemaVal - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // turn the proposed state into a legacy configuration - cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) - - diff, err := s.provider.SimpleDiff(info, priorState, cfg) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // if this is a new instance, we need to make sure ID is going to be computed - if create { - if diff == nil { - diff = terraform.NewInstanceDiff() - } - - diff.Attributes["id"] = &terraform.ResourceAttrDiff{ - NewComputed: true, - } - } - - if diff == nil || len(diff.Attributes) == 0 { - // schema.Provider.Diff returns nil if it ends up making a diff with no - // changes, but our new interface wants us to return an actual change - // description that _shows_ there are no changes. This is always the - // prior state, because we force a diff above if this is a new instance. - resp.PlannedState = req.PriorState - resp.PlannedPrivate = req.PriorPrivate - return resp, nil - } - - if priorState == nil { - priorState = &terraform.InstanceState{} - } - - // now we need to apply the diff to the prior state, so get the planned state - plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) - - plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) - - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) - - // The old SDK code has some imprecisions that cause it to sometimes - // generate differences that the SDK itself does not consider significant - // but Terraform Core would. To avoid producing weird do-nothing diffs - // in that case, we'll check if the provider as produced something we - // think is "equivalent" to the prior state and just return the prior state - // itself if so, thus ensuring that Terraform Core will treat this as - // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its - // accuracy. - forceNoChanges := false - if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { - plannedStateVal = priorStateVal - forceNoChanges = true - } - - // if this was creating the resource, we need to set any remaining computed - // fields - if create { - plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) - } - - plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.PlannedState = &proto.DynamicValue{ - Msgpack: plannedMP, - } - - // encode any timeouts into the diff Meta - t := &schema.ResourceTimeout{} - if err := t.ConfigDecode(res, cfg); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - if err := t.DiffEncode(diff); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Now we need to store any NewExtra values, which are where any actual - // StateFunc modified config fields are hidden. - privateMap := diff.Meta - if privateMap == nil { - privateMap = map[string]interface{}{} - } - - newExtra := map[string]interface{}{} - - for k, v := range diff.Attributes { - if v.NewExtra != nil { - newExtra[k] = v.NewExtra - } - } - privateMap[newExtraKey] = newExtra - - // the Meta field gets encoded into PlannedPrivate - plannedPrivate, err := json.Marshal(privateMap) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.PlannedPrivate = plannedPrivate - - // collect the attributes that require instance replacement, and convert - // them to cty.Paths. - var requiresNew []string - if !forceNoChanges { - for attr, d := range diff.Attributes { - if d.RequiresNew { - requiresNew = append(requiresNew, attr) - } - } - } - - // If anything requires a new resource already, or the "id" field indicates - // that we will be creating a new resource, then we need to add that to - // RequiresReplace so that core can tell if the instance is being replaced - // even if changes are being suppressed via "ignore_changes". - id := plannedStateVal.GetAttr("id") - if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { - requiresNew = append(requiresNew, "id") - } - - requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // convert these to the protocol structures - for _, p := range requiresReplace { - resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) - } - - return resp, nil -} - -func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { - resp := &proto.ApplyResourceChange_Response{ - // Start with the existing state as a fallback - NewState: req.PriorState, - } - - res := s.provider.ResourcesMap[req.TypeName] - schemaBlock := s.getResourceSchemaBlock(req.TypeName) - - priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - priorState, err := res.ShimInstanceStateFromValue(priorStateVal) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - private := make(map[string]interface{}) - if len(req.PlannedPrivate) > 0 { - if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - var diff *terraform.InstanceDiff - destroy := false - - // a null state means we are destroying the instance - if plannedStateVal.IsNull() { - destroy = true - diff = &terraform.InstanceDiff{ - Attributes: make(map[string]*terraform.ResourceAttrDiff), - Meta: make(map[string]interface{}), - Destroy: true, - } - } else { - diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res)) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - } - - if diff == nil { - diff = &terraform.InstanceDiff{ - Attributes: make(map[string]*terraform.ResourceAttrDiff), - Meta: make(map[string]interface{}), - } - } - - // add NewExtra Fields that may have been stored in the private data - if newExtra := private[newExtraKey]; newExtra != nil { - for k, v := range newExtra.(map[string]interface{}) { - d := diff.Attributes[k] - - if d == nil { - d = &terraform.ResourceAttrDiff{} - } - - d.NewExtra = v - diff.Attributes[k] = d - } - } - - if private != nil { - diff.Meta = private - } - - for k, d := range diff.Attributes { - // We need to turn off any RequiresNew. There could be attributes - // without changes in here inserted by helper/schema, but if they have - // RequiresNew then the state will be dropped from the ResourceData. - d.RequiresNew = false - - // Check that any "removed" attributes that don't actually exist in the - // prior state, or helper/schema will confuse itself - if d.NewRemoved { - if _, ok := priorState.Attributes[k]; !ok { - delete(diff.Attributes, k) - } - } - } - - pmSchemaBlock := s.getProviderMetaSchemaBlock() - if pmSchemaBlock != nil && req.ProviderMeta != nil { - providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - priorState.ProviderMeta = providerSchemaVal - } - - newInstanceState, err := s.provider.Apply(info, priorState, diff) - // we record the error here, but continue processing any returned state. - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - } - newStateVal := cty.NullVal(schemaBlock.ImpliedType()) - - // Always return a null value for destroy. - // While this is usually indicated by a nil state, check for missing ID or - // attributes in the case of a provider failure. - if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil - } - - // We keep the null val if we destroyed the resource, otherwise build the - // entire object, even if the new state was nil. - newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) - - newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.NewState = &proto.DynamicValue{ - Msgpack: newStateMP, - } - - meta, err := json.Marshal(newInstanceState.Meta) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.Private = meta - - // This is a signal to Terraform Core that we're doing the best we can to - // shim the legacy type system of the SDK onto the Terraform type system - // but we need it to cut us some slack. This setting should not be taken - // forward to any new SDK implementations, since setting it prevents us - // from catching certain classes of provider bug that can lead to - // confusing downstream errors. - resp.LegacyTypeSystem = true - - return resp, nil -} - -func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { - resp := &proto.ImportResourceState_Response{} - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - newInstanceStates, err := s.provider.ImportState(info, req.Id) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - for _, is := range newInstanceStates { - // copy the ID again just to be sure it wasn't missed - is.Attributes["id"] = is.ID - - resourceType := is.Ephemeral.Type - if resourceType == "" { - resourceType = req.TypeName - } - - schemaBlock := s.getResourceSchemaBlock(resourceType) - newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // Normalize the value and fill in any missing blocks. - newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - meta, err := json.Marshal(is.Meta) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - importedResource := &proto.ImportResourceState_ImportedResource{ - TypeName: resourceType, - State: &proto.DynamicValue{ - Msgpack: newStateMP, - }, - Private: meta, - } - - resp.ImportedResources = append(resp.ImportedResources, importedResource) - } - - return resp, nil -} - -func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { - resp := &proto.ReadDataSource_Response{} - - schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - info := &terraform.InstanceInfo{ - Type: req.TypeName, - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(configVal, nil); err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) - - // we need to still build the diff separately with the Read method to match - // the old behavior - diff, err := s.provider.ReadDataDiff(info, config) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - // now we can get the new complete data source - newInstanceState, err := s.provider.ReadDataApply(info, diff) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - newStateVal = copyTimeoutValues(newStateVal, configVal) - - newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - resp.State = &proto.DynamicValue{ - Msgpack: newStateMP, - } - return resp, nil -} - -func pathToAttributePath(path cty.Path) *proto.AttributePath { - var steps []*proto.AttributePath_Step - - for _, step := range path { - switch s := step.(type) { - case cty.GetAttrStep: - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_AttributeName{ - AttributeName: s.Name, - }, - }) - case cty.IndexStep: - ty := s.Key.Type() - switch ty { - case cty.Number: - i, _ := s.Key.AsBigFloat().Int64() - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyInt{ - ElementKeyInt: i, - }, - }) - case cty.String: - steps = append(steps, &proto.AttributePath_Step{ - Selector: &proto.AttributePath_Step_ElementKeyString{ - ElementKeyString: s.Key.AsString(), - }, - }) - } - } - } - - return &proto.AttributePath{Steps: steps} -} - -// helper/schema throws away timeout values from the config and stores them in -// the Private/Meta fields. we need to copy those values into the planned state -// so that core doesn't see a perpetual diff with the timeout block. -func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { - // if `to` is null we are planning to remove it altogether. - if to.IsNull() { - return to - } - toAttrs := to.AsValueMap() - // We need to remove the key since the hcl2shims will add a non-null block - // because we can't determine if a single block was null from the flatmapped - // values. This needs to conform to the correct schema for marshaling, so - // change the value to null rather than deleting it from the object map. - timeouts, ok := toAttrs[schema.TimeoutsConfigKey] - if ok { - toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) - } - - // if from is null then there are no timeouts to copy - if from.IsNull() { - return cty.ObjectVal(toAttrs) - } - - fromAttrs := from.AsValueMap() - timeouts, ok = fromAttrs[schema.TimeoutsConfigKey] - - // timeouts shouldn't be unknown, but don't copy possibly invalid values either - if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { - // no timeouts block to copy - return cty.ObjectVal(toAttrs) - } - - toAttrs[schema.TimeoutsConfigKey] = timeouts - - return cty.ObjectVal(toAttrs) -} - -// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all -// StateFuncs and CustomizeDiffs removed. This will be used during apply to -// create a diff from a planned state where the diff modifications have already -// been applied. -func stripResourceModifiers(r *schema.Resource) *schema.Resource { - if r == nil { - return nil - } - // start with a shallow copy - newResource := new(schema.Resource) - *newResource = *r - - newResource.CustomizeDiff = nil - newResource.Schema = map[string]*schema.Schema{} - - for k, s := range r.Schema { - newResource.Schema[k] = stripSchema(s) - } - - return newResource -} - -func stripSchema(s *schema.Schema) *schema.Schema { - if s == nil { - return nil - } - // start with a shallow copy - newSchema := new(schema.Schema) - *newSchema = *s - - newSchema.StateFunc = nil - - switch e := newSchema.Elem.(type) { - case *schema.Schema: - newSchema.Elem = stripSchema(e) - case *schema.Resource: - newSchema.Elem = stripResourceModifiers(e) - } - - return newSchema -} - -// Zero values and empty containers may be interchanged by the apply process. -// When there is a discrepency between src and dst value being null or empty, -// prefer the src value. This takes a little more liberty with set types, since -// we can't correlate modified set values. In the case of sets, if the src set -// was wholly known we assume the value was correctly applied and copy that -// entirely to the new value. -// While apply prefers the src value, during plan we prefer dst whenever there -// is an unknown or a set is involved, since the plan can alter the value -// however it sees fit. This however means that a CustomizeDiffFunction may not -// be able to change a null to an empty value or vice versa, but that should be -// very uncommon nor was it reliable before 0.12 either. -func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { - ty := dst.Type() - if !src.IsNull() && !src.IsKnown() { - // Return src during plan to retain unknown interpolated placeholders, - // which could be lost if we're only updating a resource. If this is a - // read scenario, then there shouldn't be any unknowns at all. - if dst.IsNull() && !apply { - return src - } - return dst - } - - // Handle null/empty changes for collections during apply. - // A change between null and empty values prefers src to make sure the state - // is consistent between plan and apply. - if ty.IsCollectionType() && apply { - dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 - srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 - - if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { - return src - } - } - - // check the invariants that we need below, to ensure we are working with - // non-null and known values. - if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { - return dst - } - - switch { - case ty.IsMapType(), ty.IsObjectType(): - var dstMap map[string]cty.Value - if !dst.IsNull() { - dstMap = dst.AsValueMap() - } - if dstMap == nil { - dstMap = map[string]cty.Value{} - } - - srcMap := src.AsValueMap() - for key, v := range srcMap { - dstVal, ok := dstMap[key] - if !ok && apply && ty.IsMapType() { - // don't transfer old map values to dst during apply - continue - } - - if dstVal == cty.NilVal { - if !apply && ty.IsMapType() { - // let plan shape this map however it wants - continue - } - dstVal = cty.NullVal(v.Type()) - } - - dstMap[key] = normalizeNullValues(dstVal, v, apply) - } - - // you can't call MapVal/ObjectVal with empty maps, but nothing was - // copied in anyway. If the dst is nil, and the src is known, assume the - // src is correct. - if len(dstMap) == 0 { - if dst.IsNull() && src.IsWhollyKnown() && apply { - return src - } - return dst - } - - if ty.IsMapType() { - // helper/schema will populate an optional+computed map with - // unknowns which we have to fixup here. - // It would be preferable to simply prevent any known value from - // becoming unknown, but concessions have to be made to retain the - // broken legacy behavior when possible. - for k, srcVal := range srcMap { - if !srcVal.IsNull() && srcVal.IsKnown() { - dstVal, ok := dstMap[k] - if !ok { - continue - } - - if !dstVal.IsNull() && !dstVal.IsKnown() { - dstMap[k] = srcVal - } - } - } - - return cty.MapVal(dstMap) - } - - return cty.ObjectVal(dstMap) - - case ty.IsSetType(): - // If the original was wholly known, then we expect that is what the - // provider applied. The apply process loses too much information to - // reliably re-create the set. - if src.IsWhollyKnown() && apply { - return src - } - - case ty.IsListType(), ty.IsTupleType(): - // If the dst is null, and the src is known, then we lost an empty value - // so take the original. - if dst.IsNull() { - if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { - return src - } - - // if dst is null and src only contains unknown values, then we lost - // those during a read or plan. - if !apply && !src.IsNull() { - allUnknown := true - for _, v := range src.AsValueSlice() { - if v.IsKnown() { - allUnknown = false - break - } - } - if allUnknown { - return src - } - } - - return dst - } - - // if the lengths are identical, then iterate over each element in succession. - srcLen := src.LengthInt() - dstLen := dst.LengthInt() - if srcLen == dstLen && srcLen > 0 { - srcs := src.AsValueSlice() - dsts := dst.AsValueSlice() - - for i := 0; i < srcLen; i++ { - dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) - } - - if ty.IsTupleType() { - return cty.TupleVal(dsts) - } - return cty.ListVal(dsts) - } - - case ty == cty.String: - // The legacy SDK should not be able to remove a value during plan or - // apply, however we are only going to overwrite this if the source was - // an empty string, since that is what is often equated with unset and - // lost in the diff process. - if dst.IsNull() && src.AsString() == "" { - return src - } - } - - return dst -} - -// validateConfigNulls checks a config value for unsupported nulls before -// attempting to shim the value. While null values can mostly be ignored in the -// configuration, since they're not supported in HCL1, the case where a null -// appears in a list-like attribute (list, set, tuple) will present a nil value -// to helper/schema which can panic. Return an error to the user in this case, -// indicating the attribute with the null value. -func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic { - var diags []*proto.Diagnostic - if v.IsNull() || !v.IsKnown() { - return diags - } - - switch { - case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): - it := v.ElementIterator() - for it.Next() { - kv, ev := it.Element() - if ev.IsNull() { - // if this is a set, the kv is also going to be null which - // isn't a valid path element, so we can't append it to the - // diagnostic. - p := path - if !kv.IsNull() { - p = append(p, cty.IndexStep{Key: kv}) - } - - diags = append(diags, &proto.Diagnostic{ - Severity: proto.Diagnostic_ERROR, - Summary: "Null value found in list", - Detail: "Null values are not allowed for this attribute value.", - Attribute: convert.PathToAttributePath(p), - }) - continue - } - - d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) - diags = convert.AppendProtoDiag(diags, d) - } - - case v.Type().IsMapType() || v.Type().IsObjectType(): - it := v.ElementIterator() - for it.Next() { - kv, ev := it.Element() - var step cty.PathStep - switch { - case v.Type().IsMapType(): - step = cty.IndexStep{Key: kv} - case v.Type().IsObjectType(): - step = cty.GetAttrStep{Name: kv.AsString()} - } - d := validateConfigNulls(ev, append(path, step)) - diags = convert.AppendProtoDiag(diags, d) - } - } - - return diags -} diff --git a/helper/plugin/grpc_provider_test.go b/helper/plugin/grpc_provider_test.go deleted file mode 100644 index 736eb258e..000000000 --- a/helper/plugin/grpc_provider_test.go +++ /dev/null @@ -1,1382 +0,0 @@ -package plugin - -import ( - "context" - "fmt" - "strconv" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/terraform/helper/schema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/terraform" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/msgpack" -) - -// The GRPCProviderServer will directly implement the go protobuf server -var _ proto.ProviderServer = (*GRPCProviderServer)(nil) - -var ( - typeComparer = cmp.Comparer(cty.Type.Equals) - valueComparer = cmp.Comparer(cty.Value.RawEquals) - equateEmpty = cmpopts.EquateEmpty() -) - -func TestUpgradeState_jsonState(t *testing.T) { - r := &schema.Resource{ - SchemaVersion: 2, - Schema: map[string]*schema.Schema{ - "two": { - Type: schema.TypeInt, - Optional: true, - }, - }, - } - - r.StateUpgraders = []schema.StateUpgrader{ - { - Version: 0, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - "zero": cty.Number, - }), - Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - _, ok := m["zero"].(float64) - if !ok { - return nil, fmt.Errorf("zero not found in %#v", m) - } - m["one"] = float64(1) - delete(m, "zero") - return m, nil - }, - }, - { - Version: 1, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - "one": cty.Number, - }), - Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - _, ok := m["one"].(float64) - if !ok { - return nil, fmt.Errorf("one not found in %#v", m) - } - m["two"] = float64(2) - delete(m, "one") - return m, nil - }, - }, - } - - server := &GRPCProviderServer{ - provider: &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "test": r, - }, - }, - } - - req := &proto.UpgradeResourceState_Request{ - TypeName: "test", - Version: 0, - RawState: &proto.RawState{ - Json: []byte(`{"id":"bar","zero":0}`), - }, - } - - resp, err := server.UpgradeResourceState(nil, req) - if err != nil { - t.Fatal(err) - } - - if len(resp.Diagnostics) > 0 { - for _, d := range resp.Diagnostics { - t.Errorf("%#v", d) - } - t.Fatal("error") - } - - val, err := msgpack.Unmarshal(resp.UpgradedState.Msgpack, r.CoreConfigSchema().ImpliedType()) - if err != nil { - t.Fatal(err) - } - - expected := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "two": cty.NumberIntVal(2), - }) - - if !cmp.Equal(expected, val, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, val, valueComparer, equateEmpty)) - } -} - -func TestUpgradeState_removedAttr(t *testing.T) { - r1 := &schema.Resource{ - Schema: map[string]*schema.Schema{ - "two": { - Type: schema.TypeString, - Optional: true, - }, - }, - } - - r2 := &schema.Resource{ - Schema: map[string]*schema.Schema{ - "multi": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "set": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "required": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - }, - }, - } - - r3 := &schema.Resource{ - Schema: map[string]*schema.Schema{ - "config_mode_attr": { - Type: schema.TypeList, - ConfigMode: schema.SchemaConfigModeAttr, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "foo": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - } - - p := &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "r1": r1, - "r2": r2, - "r3": r3, - }, - } - - server := &GRPCProviderServer{ - provider: p, - } - - for _, tc := range []struct { - name string - raw string - expected cty.Value - }{ - { - name: "r1", - raw: `{"id":"bar","removed":"removed","two":"2"}`, - expected: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "two": cty.StringVal("2"), - }), - }, - { - name: "r2", - raw: `{"id":"bar","multi":[{"set":[{"required":"ok","removed":"removed"}]}]}`, - expected: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "multi": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "required": cty.StringVal("ok"), - }), - }), - }), - }), - }), - }, - { - name: "r3", - raw: `{"id":"bar","config_mode_attr":[{"foo":"ok","removed":"removed"}]}`, - expected: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "config_mode_attr": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("ok"), - }), - }), - }), - }, - } { - t.Run(tc.name, func(t *testing.T) { - req := &proto.UpgradeResourceState_Request{ - TypeName: tc.name, - Version: 0, - RawState: &proto.RawState{ - Json: []byte(tc.raw), - }, - } - resp, err := server.UpgradeResourceState(nil, req) - if err != nil { - t.Fatal(err) - } - - if len(resp.Diagnostics) > 0 { - for _, d := range resp.Diagnostics { - t.Errorf("%#v", d) - } - t.Fatal("error") - } - val, err := msgpack.Unmarshal(resp.UpgradedState.Msgpack, p.ResourcesMap[tc.name].CoreConfigSchema().ImpliedType()) - if err != nil { - t.Fatal(err) - } - if !tc.expected.RawEquals(val) { - t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.expected, val) - } - }) - } - -} - -func TestUpgradeState_flatmapState(t *testing.T) { - r := &schema.Resource{ - SchemaVersion: 4, - Schema: map[string]*schema.Schema{ - "four": { - Type: schema.TypeInt, - Required: true, - }, - "block": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attr": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - // this MigrateState will take the state to version 2 - MigrateState: func(v int, is *terraform.InstanceState, _ interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - _, ok := is.Attributes["zero"] - if !ok { - return nil, fmt.Errorf("zero not found in %#v", is.Attributes) - } - is.Attributes["one"] = "1" - delete(is.Attributes, "zero") - fallthrough - case 1: - _, ok := is.Attributes["one"] - if !ok { - return nil, fmt.Errorf("one not found in %#v", is.Attributes) - } - is.Attributes["two"] = "2" - delete(is.Attributes, "one") - default: - return nil, fmt.Errorf("invalid schema version %d", v) - } - return is, nil - }, - } - - r.StateUpgraders = []schema.StateUpgrader{ - { - Version: 2, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - "two": cty.Number, - }), - Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - _, ok := m["two"].(float64) - if !ok { - return nil, fmt.Errorf("two not found in %#v", m) - } - m["three"] = float64(3) - delete(m, "two") - return m, nil - }, - }, - { - Version: 3, - Type: cty.Object(map[string]cty.Type{ - "id": cty.String, - "three": cty.Number, - }), - Upgrade: func(m map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - _, ok := m["three"].(float64) - if !ok { - return nil, fmt.Errorf("three not found in %#v", m) - } - m["four"] = float64(4) - delete(m, "three") - return m, nil - }, - }, - } - - server := &GRPCProviderServer{ - provider: &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "test": r, - }, - }, - } - - testReqs := []*proto.UpgradeResourceState_Request{ - { - TypeName: "test", - Version: 0, - RawState: &proto.RawState{ - Flatmap: map[string]string{ - "id": "bar", - "zero": "0", - }, - }, - }, - { - TypeName: "test", - Version: 1, - RawState: &proto.RawState{ - Flatmap: map[string]string{ - "id": "bar", - "one": "1", - }, - }, - }, - // two and up could be stored in flatmap or json states - { - TypeName: "test", - Version: 2, - RawState: &proto.RawState{ - Flatmap: map[string]string{ - "id": "bar", - "two": "2", - }, - }, - }, - { - TypeName: "test", - Version: 2, - RawState: &proto.RawState{ - Json: []byte(`{"id":"bar","two":2}`), - }, - }, - { - TypeName: "test", - Version: 3, - RawState: &proto.RawState{ - Flatmap: map[string]string{ - "id": "bar", - "three": "3", - }, - }, - }, - { - TypeName: "test", - Version: 3, - RawState: &proto.RawState{ - Json: []byte(`{"id":"bar","three":3}`), - }, - }, - { - TypeName: "test", - Version: 4, - RawState: &proto.RawState{ - Flatmap: map[string]string{ - "id": "bar", - "four": "4", - }, - }, - }, - { - TypeName: "test", - Version: 4, - RawState: &proto.RawState{ - Json: []byte(`{"id":"bar","four":4}`), - }, - }, - } - - for i, req := range testReqs { - t.Run(fmt.Sprintf("%d-%d", i, req.Version), func(t *testing.T) { - resp, err := server.UpgradeResourceState(nil, req) - if err != nil { - t.Fatal(err) - } - - if len(resp.Diagnostics) > 0 { - for _, d := range resp.Diagnostics { - t.Errorf("%#v", d) - } - t.Fatal("error") - } - - val, err := msgpack.Unmarshal(resp.UpgradedState.Msgpack, r.CoreConfigSchema().ImpliedType()) - if err != nil { - t.Fatal(err) - } - - expected := cty.ObjectVal(map[string]cty.Value{ - "block": cty.ListValEmpty(cty.Object(map[string]cty.Type{"attr": cty.String})), - "id": cty.StringVal("bar"), - "four": cty.NumberIntVal(4), - }) - - if !cmp.Equal(expected, val, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, val, valueComparer, equateEmpty)) - } - }) - } -} - -func TestUpgradeState_flatmapStateMissingMigrateState(t *testing.T) { - r := &schema.Resource{ - SchemaVersion: 1, - Schema: map[string]*schema.Schema{ - "one": { - Type: schema.TypeInt, - Required: true, - }, - }, - } - - server := &GRPCProviderServer{ - provider: &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "test": r, - }, - }, - } - - testReqs := []*proto.UpgradeResourceState_Request{ - { - TypeName: "test", - Version: 0, - RawState: &proto.RawState{ - Flatmap: map[string]string{ - "id": "bar", - "one": "1", - }, - }, - }, - { - TypeName: "test", - Version: 1, - RawState: &proto.RawState{ - Flatmap: map[string]string{ - "id": "bar", - "one": "1", - }, - }, - }, - { - TypeName: "test", - Version: 1, - RawState: &proto.RawState{ - Json: []byte(`{"id":"bar","one":1}`), - }, - }, - } - - for i, req := range testReqs { - t.Run(fmt.Sprintf("%d-%d", i, req.Version), func(t *testing.T) { - resp, err := server.UpgradeResourceState(nil, req) - if err != nil { - t.Fatal(err) - } - - if len(resp.Diagnostics) > 0 { - for _, d := range resp.Diagnostics { - t.Errorf("%#v", d) - } - t.Fatal("error") - } - - val, err := msgpack.Unmarshal(resp.UpgradedState.Msgpack, r.CoreConfigSchema().ImpliedType()) - if err != nil { - t.Fatal(err) - } - - expected := cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("bar"), - "one": cty.NumberIntVal(1), - }) - - if !cmp.Equal(expected, val, valueComparer, equateEmpty) { - t.Fatal(cmp.Diff(expected, val, valueComparer, equateEmpty)) - } - }) - } -} - -func TestPlanResourceChange(t *testing.T) { - r := &schema.Resource{ - SchemaVersion: 4, - Schema: map[string]*schema.Schema{ - "foo": { - Type: schema.TypeInt, - Optional: true, - }, - }, - } - - server := &GRPCProviderServer{ - provider: &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "test": r, - }, - }, - } - - schema := r.CoreConfigSchema() - priorState, err := msgpack.Marshal(cty.NullVal(schema.ImpliedType()), schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - // A propsed state with only the ID unknown will produce a nil diff, and - // should return the propsed state value. - proposedVal, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - })) - if err != nil { - t.Fatal(err) - } - proposedState, err := msgpack.Marshal(proposedVal, schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - testReq := &proto.PlanResourceChange_Request{ - TypeName: "test", - PriorState: &proto.DynamicValue{ - Msgpack: priorState, - }, - ProposedNewState: &proto.DynamicValue{ - Msgpack: proposedState, - }, - } - - resp, err := server.PlanResourceChange(context.Background(), testReq) - if err != nil { - t.Fatal(err) - } - - plannedStateVal, err := msgpack.Unmarshal(resp.PlannedState.Msgpack, schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(proposedVal, plannedStateVal, valueComparer) { - t.Fatal(cmp.Diff(proposedVal, plannedStateVal, valueComparer)) - } -} - -func TestApplyResourceChange(t *testing.T) { - r := &schema.Resource{ - SchemaVersion: 4, - Schema: map[string]*schema.Schema{ - "foo": { - Type: schema.TypeInt, - Optional: true, - }, - }, - Create: func(rd *schema.ResourceData, _ interface{}) error { - rd.SetId("bar") - return nil - }, - } - - server := &GRPCProviderServer{ - provider: &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "test": r, - }, - }, - } - - schema := r.CoreConfigSchema() - priorState, err := msgpack.Marshal(cty.NullVal(schema.ImpliedType()), schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - // A proposed state with only the ID unknown will produce a nil diff, and - // should return the proposed state value. - plannedVal, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - })) - if err != nil { - t.Fatal(err) - } - plannedState, err := msgpack.Marshal(plannedVal, schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - testReq := &proto.ApplyResourceChange_Request{ - TypeName: "test", - PriorState: &proto.DynamicValue{ - Msgpack: priorState, - }, - PlannedState: &proto.DynamicValue{ - Msgpack: plannedState, - }, - } - - resp, err := server.ApplyResourceChange(context.Background(), testReq) - if err != nil { - t.Fatal(err) - } - - newStateVal, err := msgpack.Unmarshal(resp.NewState.Msgpack, schema.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - id := newStateVal.GetAttr("id").AsString() - if id != "bar" { - t.Fatalf("incorrect final state: %#v\n", newStateVal) - } -} - -func TestPrepareProviderConfig(t *testing.T) { - for _, tc := range []struct { - Name string - Schema map[string]*schema.Schema - ConfigVal cty.Value - ExpectError string - ExpectConfig cty.Value - }{ - { - Name: "test prepare", - Schema: map[string]*schema.Schema{ - "foo": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - }, - ConfigVal: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - }), - ExpectConfig: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bar"), - }), - }, - { - Name: "test default", - Schema: map[string]*schema.Schema{ - "foo": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "default", - }, - }, - ConfigVal: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - ExpectConfig: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("default"), - }), - }, - { - Name: "test defaultfunc", - Schema: map[string]*schema.Schema{ - "foo": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: func() (interface{}, error) { - return "defaultfunc", nil - }, - }, - }, - ConfigVal: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - ExpectConfig: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("defaultfunc"), - }), - }, - { - Name: "test default required", - Schema: map[string]*schema.Schema{ - "foo": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: func() (interface{}, error) { - return "defaultfunc", nil - }, - }, - }, - ConfigVal: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - ExpectConfig: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("defaultfunc"), - }), - }, - { - Name: "test incorrect type", - Schema: map[string]*schema.Schema{ - "foo": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - }, - ConfigVal: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NumberIntVal(3), - }), - ExpectConfig: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("3"), - }), - }, - { - Name: "test incorrect default type", - Schema: map[string]*schema.Schema{ - "foo": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: true, - }, - }, - ConfigVal: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - ExpectConfig: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("true"), - }), - }, - { - Name: "test incorrect default bool type", - Schema: map[string]*schema.Schema{ - "foo": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: "", - }, - }, - ConfigVal: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.Bool), - }), - ExpectConfig: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.False, - }), - }, - { - Name: "test deprecated default", - Schema: map[string]*schema.Schema{ - "foo": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "do not use", - Removed: "don't use this", - }, - }, - ConfigVal: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - ExpectConfig: cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - }, - } { - t.Run(tc.Name, func(t *testing.T) { - server := &GRPCProviderServer{ - provider: &schema.Provider{ - Schema: tc.Schema, - }, - } - - block := schema.InternalMap(tc.Schema).CoreConfigSchema() - - rawConfig, err := msgpack.Marshal(tc.ConfigVal, block.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - testReq := &proto.PrepareProviderConfig_Request{ - Config: &proto.DynamicValue{ - Msgpack: rawConfig, - }, - } - - resp, err := server.PrepareProviderConfig(nil, testReq) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectError != "" && len(resp.Diagnostics) > 0 { - for _, d := range resp.Diagnostics { - if !strings.Contains(d.Summary, tc.ExpectError) { - t.Fatalf("Unexpected error: %s/%s", d.Summary, d.Detail) - } - } - return - } - - // we should have no errors past this point - for _, d := range resp.Diagnostics { - if d.Severity == proto.Diagnostic_ERROR { - t.Fatal(resp.Diagnostics) - } - } - - val, err := msgpack.Unmarshal(resp.PreparedConfig.Msgpack, block.ImpliedType()) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectConfig.GoString() != val.GoString() { - t.Fatalf("\nexpected: %#v\ngot: %#v", tc.ExpectConfig, val) - } - }) - } -} - -func TestGetSchemaTimeouts(t *testing.T) { - r := &schema.Resource{ - SchemaVersion: 4, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(time.Second), - Read: schema.DefaultTimeout(2 * time.Second), - Update: schema.DefaultTimeout(3 * time.Second), - Default: schema.DefaultTimeout(10 * time.Second), - }, - Schema: map[string]*schema.Schema{ - "foo": { - Type: schema.TypeInt, - Optional: true, - }, - }, - } - - // verify that the timeouts appear in the schema as defined - block := r.CoreConfigSchema() - timeoutsBlock := block.BlockTypes["timeouts"] - if timeoutsBlock == nil { - t.Fatal("missing timeouts in schema") - } - - if timeoutsBlock.Attributes["create"] == nil { - t.Fatal("missing create timeout in schema") - } - if timeoutsBlock.Attributes["read"] == nil { - t.Fatal("missing read timeout in schema") - } - if timeoutsBlock.Attributes["update"] == nil { - t.Fatal("missing update timeout in schema") - } - if d := timeoutsBlock.Attributes["delete"]; d != nil { - t.Fatalf("unexpected delete timeout in schema: %#v", d) - } - if timeoutsBlock.Attributes["default"] == nil { - t.Fatal("missing default timeout in schema") - } -} - -func TestNormalizeNullValues(t *testing.T) { - for i, tc := range []struct { - Src, Dst, Expect cty.Value - Apply bool - }{ - { - // The known set value is copied over the null set value - Src: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - }), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "foo": cty.String, - }))), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - }), - }), - Apply: true, - }, - { - // A zero set value is kept - Src: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetValEmpty(cty.String), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetValEmpty(cty.String), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetValEmpty(cty.String), - }), - }, - { - // The known set value is copied over the null set value - Src: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - }), - }), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "foo": cty.String, - }))), - }), - // If we're only in a plan, we can't compare sets at all - Expect: cty.ObjectVal(map[string]cty.Value{ - "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "foo": cty.String, - }))), - }), - }, - { - // The empty map is copied over the null map - Src: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapValEmpty(cty.String), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "map": cty.NullVal(cty.Map(cty.String)), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapValEmpty(cty.String), - }), - Apply: true, - }, - { - // A zero value primitive is copied over a null primitive - Src: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal(""), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "string": cty.NullVal(cty.String), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal(""), - }), - Apply: true, - }, - { - // Plan primitives are kept - Src: cty.ObjectVal(map[string]cty.Value{ - "string": cty.NumberIntVal(0), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "string": cty.NullVal(cty.Number), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "string": cty.NullVal(cty.Number), - }), - }, - { - // Neither plan nor apply should remove empty strings - Src: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal(""), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "string": cty.NullVal(cty.String), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal(""), - }), - }, - { - // Neither plan nor apply should remove empty strings - Src: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal(""), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "string": cty.NullVal(cty.String), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "string": cty.StringVal(""), - }), - Apply: true, - }, - { - // The null map is retained, because the src was unknown - Src: cty.ObjectVal(map[string]cty.Value{ - "map": cty.UnknownVal(cty.Map(cty.String)), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "map": cty.NullVal(cty.Map(cty.String)), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "map": cty.NullVal(cty.Map(cty.String)), - }), - Apply: true, - }, - { - // the nul set is retained, because the src set contains an unknown value - Src: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.UnknownVal(cty.String), - }), - }), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "foo": cty.String, - }))), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "foo": cty.String, - }))), - }), - Apply: true, - }, - { - // Retain don't re-add unexpected planned values in a map - Src: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - "b": cty.StringVal(""), - }), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - }), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - }), - }), - }, - { - // Remove extra values after apply - Src: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - "b": cty.StringVal("b"), - }), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - }), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - }), - }), - Apply: true, - }, - { - Src: cty.ObjectVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - }), - Dst: cty.EmptyObjectVal, - Expect: cty.ObjectVal(map[string]cty.Value{ - "a": cty.NullVal(cty.String), - }), - }, - - // a list in an object in a list, going from null to empty - { - Src: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.UnknownVal(cty.String), - "access_config": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String}))), - "address": cty.NullVal(cty.String), - "name": cty.StringVal("nic0"), - })}), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.StringVal("10.128.0.64"), - "access_config": cty.ListValEmpty(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String})), - "address": cty.StringVal("address"), - "name": cty.StringVal("nic0"), - }), - }), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.StringVal("10.128.0.64"), - "access_config": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String}))), - "address": cty.StringVal("address"), - "name": cty.StringVal("nic0"), - }), - }), - }), - Apply: true, - }, - - // a list in an object in a list, going from empty to null - { - Src: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.UnknownVal(cty.String), - "access_config": cty.ListValEmpty(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String})), - "address": cty.NullVal(cty.String), - "name": cty.StringVal("nic0"), - })}), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.StringVal("10.128.0.64"), - "access_config": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String}))), - "address": cty.StringVal("address"), - "name": cty.StringVal("nic0"), - }), - }), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.StringVal("10.128.0.64"), - "access_config": cty.ListValEmpty(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String})), - "address": cty.StringVal("address"), - "name": cty.StringVal("nic0"), - }), - }), - }), - Apply: true, - }, - // the empty list should be transferred, but the new unknown should not be overridden - { - Src: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.StringVal("10.128.0.64"), - "access_config": cty.ListValEmpty(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String})), - "address": cty.NullVal(cty.String), - "name": cty.StringVal("nic0"), - })}), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.UnknownVal(cty.String), - "access_config": cty.NullVal(cty.List(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String}))), - "address": cty.StringVal("address"), - "name": cty.StringVal("nic0"), - }), - }), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "network_interface": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "network_ip": cty.UnknownVal(cty.String), - "access_config": cty.ListValEmpty(cty.Object(map[string]cty.Type{"public_ptr_domain_name": cty.String, "nat_ip": cty.String})), - "address": cty.StringVal("address"), - "name": cty.StringVal("nic0"), - }), - }), - }), - }, - { - // fix unknowns added to a map - Src: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - "b": cty.StringVal(""), - }), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - "b": cty.UnknownVal(cty.String), - }), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "a": cty.StringVal("a"), - "b": cty.StringVal(""), - }), - }), - }, - { - // fix unknowns lost from a list - Src: cty.ObjectVal(map[string]cty.Value{ - "top": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "values": cty.ListVal([]cty.Value{cty.UnknownVal(cty.String)}), - }), - }), - }), - }), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "top": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "values": cty.NullVal(cty.List(cty.String)), - }), - }), - }), - }), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "top": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "values": cty.ListVal([]cty.Value{cty.UnknownVal(cty.String)}), - }), - }), - }), - }), - }), - }, - { - Src: cty.ObjectVal(map[string]cty.Value{ - "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "list": cty.List(cty.String), - }))), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "list": cty.List(cty.String), - })), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "list": cty.List(cty.String), - })), - }), - }, - { - Src: cty.ObjectVal(map[string]cty.Value{ - "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "list": cty.List(cty.String), - }))), - }), - Dst: cty.ObjectVal(map[string]cty.Value{ - "set": cty.SetValEmpty(cty.Object(map[string]cty.Type{ - "list": cty.List(cty.String), - })), - }), - Expect: cty.ObjectVal(map[string]cty.Value{ - "set": cty.NullVal(cty.Set(cty.Object(map[string]cty.Type{ - "list": cty.List(cty.String), - }))), - }), - Apply: true, - }, - } { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - got := normalizeNullValues(tc.Dst, tc.Src, tc.Apply) - if !got.RawEquals(tc.Expect) { - t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.Expect, got) - } - }) - } -} - -func TestValidateNulls(t *testing.T) { - for i, tc := range []struct { - Cfg cty.Value - Err bool - }{ - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - }), - Err: true, - }, - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "string": cty.StringVal("string"), - "null": cty.NullVal(cty.String), - }), - }), - Err: false, - }, - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "object": cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - }), - }), - Err: true, - }, - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "object": cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - "list2": cty.ListVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - }), - }), - Err: true, - }, - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "object": cty.ObjectVal(map[string]cty.Value{ - "list": cty.SetVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - }), - }), - Err: true, - }, - } { - t.Run(strconv.Itoa(i), func(t *testing.T) { - d := validateConfigNulls(tc.Cfg, nil) - diags := convert.ProtoToDiagnostics(d) - switch { - case tc.Err: - if !diags.HasErrors() { - t.Fatal("expected error") - } - default: - if diags.HasErrors() { - t.Fatalf("unexpected error: %q", diags.Err()) - } - } - }) - } -} diff --git a/helper/plugin/grpc_provisioner.go b/helper/plugin/grpc_provisioner.go deleted file mode 100644 index 088e94e4a..000000000 --- a/helper/plugin/grpc_provisioner.go +++ /dev/null @@ -1,201 +0,0 @@ -package plugin - -import ( - "log" - "strings" - "unicode/utf8" - - "github.com/hashicorp/terraform/helper/schema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/terraform" - "github.com/zclconf/go-cty/cty" - ctyconvert "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/msgpack" - context "golang.org/x/net/context" -) - -// NewGRPCProvisionerServerShim wraps a terraform.ResourceProvisioner in a -// proto.ProvisionerServer implementation. If the provided provisioner is not a -// *schema.Provisioner, this will return nil, -func NewGRPCProvisionerServerShim(p terraform.ResourceProvisioner) *GRPCProvisionerServer { - sp, ok := p.(*schema.Provisioner) - if !ok { - return nil - } - return &GRPCProvisionerServer{ - provisioner: sp, - } -} - -type GRPCProvisionerServer struct { - provisioner *schema.Provisioner -} - -func (s *GRPCProvisionerServer) GetSchema(_ context.Context, req *proto.GetProvisionerSchema_Request) (*proto.GetProvisionerSchema_Response, error) { - resp := &proto.GetProvisionerSchema_Response{} - - resp.Provisioner = &proto.Schema{ - Block: convert.ConfigSchemaToProto(schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()), - } - - return resp, nil -} - -func (s *GRPCProvisionerServer) ValidateProvisionerConfig(_ context.Context, req *proto.ValidateProvisionerConfig_Request) (*proto.ValidateProvisionerConfig_Response, error) { - resp := &proto.ValidateProvisionerConfig_Response{} - - cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema() - - configVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil - } - - config := terraform.NewResourceConfigShimmed(configVal, cfgSchema) - - warns, errs := s.provisioner.Validate(config) - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) - - return resp, nil -} - -// stringMapFromValue converts a cty.Value to a map[stirng]string. -// This will panic if the val is not a cty.Map(cty.String). -func stringMapFromValue(val cty.Value) map[string]string { - m := map[string]string{} - if val.IsNull() || !val.IsKnown() { - return m - } - - for it := val.ElementIterator(); it.Next(); { - ak, av := it.Element() - name := ak.AsString() - - if !av.IsKnown() || av.IsNull() { - continue - } - - av, _ = ctyconvert.Convert(av, cty.String) - m[name] = av.AsString() - } - - return m -} - -// uiOutput implements the terraform.UIOutput interface to adapt the grpc -// stream to the legacy Provisioner.Apply method. -type uiOutput struct { - srv proto.Provisioner_ProvisionResourceServer -} - -func (o uiOutput) Output(s string) { - err := o.srv.Send(&proto.ProvisionResource_Response{ - Output: toValidUTF8(s, string(utf8.RuneError)), - }) - if err != nil { - log.Printf("[ERROR] %s", err) - } -} - -func (s *GRPCProvisionerServer) ProvisionResource(req *proto.ProvisionResource_Request, srv proto.Provisioner_ProvisionResourceServer) error { - // We send back a diagnostics over the stream if there was a - // provisioner-side problem. - srvResp := &proto.ProvisionResource_Response{} - - cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema() - cfgVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType()) - if err != nil { - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) - srv.Send(srvResp) - return nil - } - resourceConfig := terraform.NewResourceConfigShimmed(cfgVal, cfgSchema) - - connVal, err := msgpack.Unmarshal(req.Connection.Msgpack, cty.Map(cty.String)) - if err != nil { - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) - srv.Send(srvResp) - return nil - } - - conn := stringMapFromValue(connVal) - - instanceState := &terraform.InstanceState{ - Ephemeral: terraform.EphemeralState{ - ConnInfo: conn, - }, - Meta: make(map[string]interface{}), - } - - err = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig) - if err != nil { - srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) - srv.Send(srvResp) - } - return nil -} - -func (s *GRPCProvisionerServer) Stop(_ context.Context, req *proto.Stop_Request) (*proto.Stop_Response, error) { - resp := &proto.Stop_Response{} - - err := s.provisioner.Stop() - if err != nil { - resp.Error = err.Error() - } - - return resp, nil -} - -// FIXME: backported from go1.13 strings package, remove once terraform is -// using go >= 1.13 -// ToValidUTF8 returns a copy of the string s with each run of invalid UTF-8 byte sequences -// replaced by the replacement string, which may be empty. -func toValidUTF8(s, replacement string) string { - var b strings.Builder - - for i, c := range s { - if c != utf8.RuneError { - continue - } - - _, wid := utf8.DecodeRuneInString(s[i:]) - if wid == 1 { - b.Grow(len(s) + len(replacement)) - b.WriteString(s[:i]) - s = s[i:] - break - } - } - - // Fast path for unchanged input - if b.Cap() == 0 { // didn't call b.Grow above - return s - } - - invalid := false // previous byte was from an invalid UTF-8 sequence - for i := 0; i < len(s); { - c := s[i] - if c < utf8.RuneSelf { - i++ - invalid = false - b.WriteByte(c) - continue - } - _, wid := utf8.DecodeRuneInString(s[i:]) - if wid == 1 { - i++ - if !invalid { - invalid = true - b.WriteString(replacement) - } - continue - } - invalid = false - b.WriteString(s[i : i+wid]) - i += wid - } - - return b.String() -} diff --git a/helper/plugin/grpc_provisioner_test.go b/helper/plugin/grpc_provisioner_test.go deleted file mode 100644 index 9b38daf4a..000000000 --- a/helper/plugin/grpc_provisioner_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package plugin - -import ( - "testing" - "unicode/utf8" - - "github.com/golang/mock/gomock" - "github.com/hashicorp/terraform/helper/schema" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - mockproto "github.com/hashicorp/terraform/plugin/mock_proto" - "github.com/hashicorp/terraform/terraform" - context "golang.org/x/net/context" -) - -var _ proto.ProvisionerServer = (*GRPCProvisionerServer)(nil) - -type validUTF8Matcher string - -func (m validUTF8Matcher) Matches(x interface{}) bool { - resp := x.(*proto.ProvisionResource_Response) - return utf8.Valid([]byte(resp.Output)) -} - -func (m validUTF8Matcher) String() string { - return string(m) -} - -func mockProvisionerServer(t *testing.T, c *gomock.Controller) *mockproto.MockProvisioner_ProvisionResourceServer { - server := mockproto.NewMockProvisioner_ProvisionResourceServer(c) - - server.EXPECT().Send( - validUTF8Matcher("check for valid utf8"), - ).Return(nil) - - return server -} - -// ensure that a provsioner cannot return invalid utf8 which isn't allowed in -// the grpc protocol. -func TestProvisionerInvalidUTF8(t *testing.T) { - p := &schema.Provisioner{ - ConnSchema: map[string]*schema.Schema{ - "foo": { - Type: schema.TypeString, - Optional: true, - }, - }, - - Schema: map[string]*schema.Schema{ - "foo": { - Type: schema.TypeInt, - Optional: true, - }, - }, - - ApplyFunc: func(ctx context.Context) error { - out := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) - out.Output("invalid \xc3\x28\n") - return nil - }, - } - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - srv := mockProvisionerServer(t, ctrl) - cfg := &proto.DynamicValue{ - Msgpack: []byte("\x81\xa3foo\x01"), - } - conn := &proto.DynamicValue{ - Msgpack: []byte("\x81\xa3foo\xa4host"), - } - provisionerServer := NewGRPCProvisionerServerShim(p) - req := &proto.ProvisionResource_Request{ - Config: cfg, - Connection: conn, - } - - if err := provisionerServer.ProvisionResource(req, srv); err != nil { - t.Fatal(err) - } -} diff --git a/helper/plugin/unknown.go b/helper/plugin/unknown.go deleted file mode 100644 index 64a6784e8..000000000 --- a/helper/plugin/unknown.go +++ /dev/null @@ -1,131 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -// SetUnknowns takes a cty.Value, and compares it to the schema setting any null -// values which are computed to unknown. -func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { - if !val.IsKnown() { - return val - } - - // If the object was null, we still need to handle the top level attributes - // which might be computed, but we don't need to expand the blocks. - if val.IsNull() { - objMap := map[string]cty.Value{} - allNull := true - for name, attr := range schema.Attributes { - switch { - case attr.Computed: - objMap[name] = cty.UnknownVal(attr.Type) - allNull = false - default: - objMap[name] = cty.NullVal(attr.Type) - } - } - - // If this object has no unknown attributes, then we can leave it null. - if allNull { - return val - } - - return cty.ObjectVal(objMap) - } - - valMap := val.AsValueMap() - newVals := make(map[string]cty.Value) - - for name, attr := range schema.Attributes { - v := valMap[name] - - if attr.Computed && v.IsNull() { - newVals[name] = cty.UnknownVal(attr.Type) - continue - } - - newVals[name] = v - } - - for name, blockS := range schema.BlockTypes { - blockVal := valMap[name] - if blockVal.IsNull() || !blockVal.IsKnown() { - newVals[name] = blockVal - continue - } - - blockValType := blockVal.Type() - blockElementType := blockS.Block.ImpliedType() - - // This switches on the value type here, so we can correctly switch - // between Tuples/Lists and Maps/Objects. - switch { - case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: - // NestingSingle is the only exception here, where we treat the - // block directly as an object - newVals[name] = SetUnknowns(blockVal, &blockS.Block) - - case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): - listVals := blockVal.AsValueSlice() - newListVals := make([]cty.Value, 0, len(listVals)) - - for _, v := range listVals { - newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) - } - - switch { - case blockValType.IsSetType(): - switch len(newListVals) { - case 0: - newVals[name] = cty.SetValEmpty(blockElementType) - default: - newVals[name] = cty.SetVal(newListVals) - } - case blockValType.IsListType(): - switch len(newListVals) { - case 0: - newVals[name] = cty.ListValEmpty(blockElementType) - default: - newVals[name] = cty.ListVal(newListVals) - } - case blockValType.IsTupleType(): - newVals[name] = cty.TupleVal(newListVals) - } - - case blockValType.IsMapType(), blockValType.IsObjectType(): - mapVals := blockVal.AsValueMap() - newMapVals := make(map[string]cty.Value) - - for k, v := range mapVals { - newMapVals[k] = SetUnknowns(v, &blockS.Block) - } - - switch { - case blockValType.IsMapType(): - switch len(newMapVals) { - case 0: - newVals[name] = cty.MapValEmpty(blockElementType) - default: - newVals[name] = cty.MapVal(newMapVals) - } - case blockValType.IsObjectType(): - if len(newMapVals) == 0 { - // We need to populate empty values to make a valid object. - for attr, ty := range blockElementType.AttributeTypes() { - newMapVals[attr] = cty.NullVal(ty) - } - } - newVals[name] = cty.ObjectVal(newMapVals) - } - - default: - panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) - } - } - - return cty.ObjectVal(newVals) -} diff --git a/helper/plugin/unknown_test.go b/helper/plugin/unknown_test.go deleted file mode 100644 index 4214b1849..000000000 --- a/helper/plugin/unknown_test.go +++ /dev/null @@ -1,483 +0,0 @@ -package plugin - -import ( - "testing" - - "github.com/hashicorp/terraform/configs/configschema" - "github.com/zclconf/go-cty/cty" -) - -func TestSetUnknowns(t *testing.T) { - for n, tc := range map[string]struct { - Schema *configschema.Block - Val cty.Value - Expected cty.Value - }{ - "empty": { - &configschema.Block{}, - cty.EmptyObjectVal, - cty.EmptyObjectVal, - }, - "no prior": { - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - "bar": { - Type: cty.String, - Computed: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "baz": { - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "boz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "biz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.NullVal(cty.Object(map[string]cty.Type{ - "foo": cty.String, - "bar": cty.String, - "baz": cty.Object(map[string]cty.Type{ - "boz": cty.String, - "biz": cty.String, - }), - })), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.NullVal(cty.String), - "bar": cty.UnknownVal(cty.String), - }), - }, - "null stays null": { - // if the object has no computed attributes, it should stay null - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": &configschema.Attribute{ - Type: cty.String, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "baz": { - Nesting: configschema.NestingSet, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "boz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.NullVal(cty.Object(map[string]cty.Type{ - "foo": cty.String, - "baz": cty.Set(cty.Object(map[string]cty.Type{ - "boz": cty.String, - })), - })), - cty.NullVal(cty.Object(map[string]cty.Type{ - "foo": cty.String, - "baz": cty.Set(cty.Object(map[string]cty.Type{ - "boz": cty.String, - })), - })), - }, - "no prior with set": { - // the set value should remain null - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": &configschema.Attribute{ - Type: cty.String, - Computed: true, - }, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "baz": { - Nesting: configschema.NestingSet, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "boz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.NullVal(cty.Object(map[string]cty.Type{ - "foo": cty.String, - "baz": cty.Set(cty.Object(map[string]cty.Type{ - "boz": cty.String, - })), - })), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.UnknownVal(cty.String), - }), - }, - "prior attributes": { - &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": { - Type: cty.String, - Optional: true, - }, - "bar": { - Type: cty.String, - Computed: true, - }, - "baz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "boz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bonjour"), - "bar": cty.StringVal("petit dejeuner"), - "baz": cty.StringVal("grande dejeuner"), - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.StringVal("bonjour"), - "bar": cty.StringVal("petit dejeuner"), - "baz": cty.StringVal("grande dejeuner"), - "boz": cty.UnknownVal(cty.String), - }), - }, - "prior nested single": { - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Nesting: configschema.NestingSingle, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "baz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("beep"), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("beep"), - "baz": cty.UnknownVal(cty.String), - }), - }), - }, - "prior nested list": { - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "baz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("bap"), - }), - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("blep"), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ListVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("bap"), - "baz": cty.UnknownVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("blep"), - "baz": cty.UnknownVal(cty.String), - }), - }), - }), - }, - "prior nested map": { - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Nesting: configschema.NestingMap, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "baz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.NullVal(cty.String), - "baz": cty.StringVal("boop"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("blep"), - "baz": cty.NullVal(cty.String), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.MapVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.UnknownVal(cty.String), - "baz": cty.StringVal("boop"), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("blep"), - "baz": cty.UnknownVal(cty.String), - }), - }), - }), - }, - "prior nested set": { - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Nesting: configschema.NestingSet, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.String, - Optional: true, - }, - "baz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("blep"), - "baz": cty.NullVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("boop"), - "baz": cty.NullVal(cty.String), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("blep"), - "baz": cty.UnknownVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("boop"), - "baz": cty.UnknownVal(cty.String), - }), - }), - }), - }, - "sets differing only by unknown": { - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Nesting: configschema.NestingSet, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.String, - Optional: true, - }, - "baz": { - Type: cty.String, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("boop"), - "baz": cty.NullVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("boop"), - "baz": cty.UnknownVal(cty.String), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.SetVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("boop"), - "baz": cty.UnknownVal(cty.String), - }), - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("boop"), - "baz": cty.UnknownVal(cty.String), - }), - }), - }), - }, - "prior nested list with dynamic": { - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "baz": { - Type: cty.DynamicPseudoType, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.TupleVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.NullVal(cty.String), - "baz": cty.NumberIntVal(8), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.TupleVal([]cty.Value{ - cty.ObjectVal(map[string]cty.Value{ - "bar": cty.UnknownVal(cty.String), - "baz": cty.NumberIntVal(8), - }), - }), - }), - }, - "prior nested map with dynamic": { - &configschema.Block{ - BlockTypes: map[string]*configschema.NestedBlock{ - "foo": { - Nesting: configschema.NestingMap, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "bar": { - Type: cty.String, - Optional: true, - Computed: true, - }, - "baz": { - Type: cty.DynamicPseudoType, - Optional: true, - Computed: true, - }, - }, - }, - }, - }, - }, - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("beep"), - "baz": cty.NullVal(cty.DynamicPseudoType), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("boop"), - "baz": cty.NumberIntVal(8), - }), - }), - }), - cty.ObjectVal(map[string]cty.Value{ - "foo": cty.ObjectVal(map[string]cty.Value{ - "a": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("beep"), - "baz": cty.UnknownVal(cty.DynamicPseudoType), - }), - "b": cty.ObjectVal(map[string]cty.Value{ - "bar": cty.StringVal("boop"), - "baz": cty.NumberIntVal(8), - }), - }), - }), - }, - } { - t.Run(n, func(t *testing.T) { - got := SetUnknowns(tc.Val, tc.Schema) - if !got.RawEquals(tc.Expected) { - t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.Expected, got) - } - }) - } -} diff --git a/helper/resource/error.go b/helper/resource/error.go deleted file mode 100644 index 7ee21614b..000000000 --- a/helper/resource/error.go +++ /dev/null @@ -1,79 +0,0 @@ -package resource - -import ( - "fmt" - "strings" - "time" -) - -type NotFoundError struct { - LastError error - LastRequest interface{} - LastResponse interface{} - Message string - Retries int -} - -func (e *NotFoundError) Error() string { - if e.Message != "" { - return e.Message - } - - if e.Retries > 0 { - return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) - } - - return "couldn't find resource" -} - -// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending -type UnexpectedStateError struct { - LastError error - State string - ExpectedState []string -} - -func (e *UnexpectedStateError) Error() string { - return fmt.Sprintf( - "unexpected state '%s', wanted target '%s'. last error: %s", - e.State, - strings.Join(e.ExpectedState, ", "), - e.LastError, - ) -} - -// TimeoutError is returned when WaitForState times out -type TimeoutError struct { - LastError error - LastState string - Timeout time.Duration - ExpectedState []string -} - -func (e *TimeoutError) Error() string { - expectedState := "resource to be gone" - if len(e.ExpectedState) > 0 { - expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) - } - - extraInfo := make([]string, 0) - if e.LastState != "" { - extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) - } - if e.Timeout > 0 { - extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) - } - - suffix := "" - if len(extraInfo) > 0 { - suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) - } - - if e.LastError != nil { - return fmt.Sprintf("timeout while waiting for %s%s: %s", - expectedState, suffix, e.LastError) - } - - return fmt.Sprintf("timeout while waiting for %s%s", - expectedState, suffix) -} diff --git a/helper/resource/grpc_test_provider.go b/helper/resource/grpc_test_provider.go deleted file mode 100644 index 0742e993b..000000000 --- a/helper/resource/grpc_test_provider.go +++ /dev/null @@ -1,43 +0,0 @@ -package resource - -import ( - "context" - "net" - "time" - - "github.com/hashicorp/terraform/helper/plugin" - proto "github.com/hashicorp/terraform/internal/tfplugin5" - tfplugin "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" -) - -// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC -// shim and starts it in a grpc server using an inmem connection. It returns a -// GRPCClient for this new server to test the shimmed resource provider. -func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { - listener := bufconn.Listen(256 * 1024) - grpcServer := grpc.NewServer() - - p := plugin.NewGRPCProviderServerShim(rp) - proto.RegisterProviderServer(grpcServer, p) - - go grpcServer.Serve(listener) - - conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { - return listener.Dial() - }), grpc.WithInsecure()) - if err != nil { - panic(err) - } - - var pp tfplugin.GRPCProviderPlugin - client, _ := pp.GRPCClient(context.Background(), nil, conn) - - grpcClient := client.(*tfplugin.GRPCProvider) - grpcClient.TestServer = grpcServer - - return grpcClient -} diff --git a/helper/resource/id.go b/helper/resource/id.go deleted file mode 100644 index 44949550e..000000000 --- a/helper/resource/id.go +++ /dev/null @@ -1,45 +0,0 @@ -package resource - -import ( - "fmt" - "strings" - "sync" - "time" -) - -const UniqueIdPrefix = `terraform-` - -// idCounter is a monotonic counter for generating ordered unique ids. -var idMutex sync.Mutex -var idCounter uint32 - -// Helper for a resource to generate a unique identifier w/ default prefix -func UniqueId() string { - return PrefixedUniqueId(UniqueIdPrefix) -} - -// UniqueIDSuffixLength is the string length of the suffix generated by -// PrefixedUniqueId. This can be used by length validation functions to -// ensure prefixes are the correct length for the target field. -const UniqueIDSuffixLength = 26 - -// Helper for a resource to generate a unique identifier w/ given prefix -// -// After the prefix, the ID consists of an incrementing 26 digit value (to match -// previous timestamp output). After the prefix, the ID consists of a timestamp -// and an incrementing 8 hex digit value The timestamp means that multiple IDs -// created with the same prefix will sort in the order of their creation, even -// across multiple terraform executions, as long as the clock is not turned back -// between calls, and as long as any given terraform execution generates fewer -// than 4 billion IDs. -func PrefixedUniqueId(prefix string) string { - // Be precise to 4 digits of fractional seconds, but remove the dot before the - // fractional seconds. - timestamp := strings.Replace( - time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) - - idMutex.Lock() - defer idMutex.Unlock() - idCounter++ - return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) -} diff --git a/helper/resource/id_test.go b/helper/resource/id_test.go deleted file mode 100644 index f1560dab1..000000000 --- a/helper/resource/id_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package resource - -import ( - "regexp" - "strings" - "testing" - "time" -) - -var allDigits = regexp.MustCompile(`^\d+$`) -var allHex = regexp.MustCompile(`^[a-f0-9]+$`) - -func TestUniqueId(t *testing.T) { - split := func(rest string) (timestamp, increment string) { - return rest[:18], rest[18:] - } - - iterations := 10000 - ids := make(map[string]struct{}) - var id, lastId string - for i := 0; i < iterations; i++ { - id = UniqueId() - - if _, ok := ids[id]; ok { - t.Fatalf("Got duplicated id! %s", id) - } - - if !strings.HasPrefix(id, UniqueIdPrefix) { - t.Fatalf("Unique ID didn't have terraform- prefix! %s", id) - } - - rest := strings.TrimPrefix(id, UniqueIdPrefix) - - if len(rest) != UniqueIDSuffixLength { - t.Fatalf("PrefixedUniqueId is out of sync with UniqueIDSuffixLength, post-prefix part has wrong length! %s", rest) - } - - timestamp, increment := split(rest) - - if !allDigits.MatchString(timestamp) { - t.Fatalf("Timestamp not all digits! %s", timestamp) - } - - if !allHex.MatchString(increment) { - t.Fatalf("Increment part not all hex! %s", increment) - } - - if lastId != "" && lastId >= id { - t.Fatalf("IDs not ordered! %s vs %s", lastId, id) - } - - ids[id] = struct{}{} - lastId = id - } - - id1 := UniqueId() - time.Sleep(time.Millisecond) - id2 := UniqueId() - timestamp1, _ := split(strings.TrimPrefix(id1, UniqueIdPrefix)) - timestamp2, _ := split(strings.TrimPrefix(id2, UniqueIdPrefix)) - - if timestamp1 == timestamp2 { - t.Fatalf("Timestamp part should update at least once a millisecond %s %s", - id1, id2) - } -} diff --git a/helper/resource/state.go b/helper/resource/state.go deleted file mode 100644 index 88a839664..000000000 --- a/helper/resource/state.go +++ /dev/null @@ -1,259 +0,0 @@ -package resource - -import ( - "log" - "time" -) - -var refreshGracePeriod = 30 * time.Second - -// StateRefreshFunc is a function type used for StateChangeConf that is -// responsible for refreshing the item being watched for a state change. -// -// It returns three results. `result` is any object that will be returned -// as the final object after waiting for state change. This allows you to -// return the final updated object, for example an EC2 instance after refreshing -// it. -// -// `state` is the latest state of that object. And `err` is any error that -// may have happened while refreshing the state. -type StateRefreshFunc func() (result interface{}, state string, err error) - -// StateChangeConf is the configuration struct used for `WaitForState`. -type StateChangeConf struct { - Delay time.Duration // Wait this time before starting checks - Pending []string // States that are "allowed" and will continue trying - Refresh StateRefreshFunc // Refreshes the current state - Target []string // Target state - Timeout time.Duration // The amount of time to wait before timeout - MinTimeout time.Duration // Smallest time to wait before refreshes - PollInterval time.Duration // Override MinTimeout/backoff and only poll this often - NotFoundChecks int // Number of times to allow not found - - // This is to work around inconsistent APIs - ContinuousTargetOccurence int // Number of times the Target state has to occur continuously -} - -// WaitForState watches an object and waits for it to achieve the state -// specified in the configuration using the specified Refresh() func, -// waiting the number of seconds specified in the timeout configuration. -// -// If the Refresh function returns an error, exit immediately with that error. -// -// If the Refresh function returns a state other than the Target state or one -// listed in Pending, return immediately with an error. -// -// If the Timeout is exceeded before reaching the Target state, return an -// error. -// -// Otherwise, the result is the result of the first call to the Refresh function to -// reach the target state. -func (conf *StateChangeConf) WaitForState() (interface{}, error) { - log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) - - notfoundTick := 0 - targetOccurence := 0 - - // Set a default for times to check for not found - if conf.NotFoundChecks == 0 { - conf.NotFoundChecks = 20 - } - - if conf.ContinuousTargetOccurence == 0 { - conf.ContinuousTargetOccurence = 1 - } - - type Result struct { - Result interface{} - State string - Error error - Done bool - } - - // Read every result from the refresh loop, waiting for a positive result.Done. - resCh := make(chan Result, 1) - // cancellation channel for the refresh loop - cancelCh := make(chan struct{}) - - result := Result{} - - go func() { - defer close(resCh) - - time.Sleep(conf.Delay) - - // start with 0 delay for the first loop - var wait time.Duration - - for { - // store the last result - resCh <- result - - // wait and watch for cancellation - select { - case <-cancelCh: - return - case <-time.After(wait): - // first round had no wait - if wait == 0 { - wait = 100 * time.Millisecond - } - } - - res, currentState, err := conf.Refresh() - result = Result{ - Result: res, - State: currentState, - Error: err, - } - - if err != nil { - resCh <- result - return - } - - // If we're waiting for the absence of a thing, then return - if res == nil && len(conf.Target) == 0 { - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - - if res == nil { - // If we didn't find the resource, check if we have been - // not finding it for awhile, and if so, report an error. - notfoundTick++ - if notfoundTick > conf.NotFoundChecks { - result.Error = &NotFoundError{ - LastError: err, - Retries: notfoundTick, - } - resCh <- result - return - } - } else { - // Reset the counter for when a resource isn't found - notfoundTick = 0 - found := false - - for _, allowed := range conf.Target { - if currentState == allowed { - found = true - targetOccurence++ - if conf.ContinuousTargetOccurence == targetOccurence { - result.Done = true - resCh <- result - return - } - continue - } - } - - for _, allowed := range conf.Pending { - if currentState == allowed { - found = true - targetOccurence = 0 - break - } - } - - if !found && len(conf.Pending) > 0 { - result.Error = &UnexpectedStateError{ - LastError: err, - State: result.State, - ExpectedState: conf.Target, - } - resCh <- result - return - } - } - - // Wait between refreshes using exponential backoff, except when - // waiting for the target state to reoccur. - if targetOccurence == 0 { - wait *= 2 - } - - // If a poll interval has been specified, choose that interval. - // Otherwise bound the default value. - if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { - wait = conf.PollInterval - } else { - if wait < conf.MinTimeout { - wait = conf.MinTimeout - } else if wait > 10*time.Second { - wait = 10 * time.Second - } - } - - log.Printf("[TRACE] Waiting %s before next try", wait) - } - }() - - // store the last value result from the refresh loop - lastResult := Result{} - - timeout := time.After(conf.Timeout) - for { - select { - case r, ok := <-resCh: - // channel closed, so return the last result - if !ok { - return lastResult.Result, lastResult.Error - } - - // we reached the intended state - if r.Done { - return r.Result, r.Error - } - - // still waiting, store the last result - lastResult = r - - case <-timeout: - log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) - log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) - - // cancel the goroutine and start our grace period timer - close(cancelCh) - timeout := time.After(refreshGracePeriod) - - // we need a for loop and a label to break on, because we may have - // an extra response value to read, but still want to wait for the - // channel to close. - forSelect: - for { - select { - case r, ok := <-resCh: - if r.Done { - // the last refresh loop reached the desired state - return r.Result, r.Error - } - - if !ok { - // the goroutine returned - break forSelect - } - - // target state not reached, save the result for the - // TimeoutError and wait for the channel to close - lastResult = r - case <-timeout: - log.Println("[ERROR] WaitForState exceeded refresh grace period") - break forSelect - } - } - - return nil, &TimeoutError{ - LastError: lastResult.Error, - LastState: lastResult.State, - Timeout: conf.Timeout, - ExpectedState: conf.Target, - } - } - } -} diff --git a/helper/resource/state_shim.go b/helper/resource/state_shim.go deleted file mode 100644 index aa2231b28..000000000 --- a/helper/resource/state_shim.go +++ /dev/null @@ -1,218 +0,0 @@ -package resource - -import ( - "encoding/json" - "fmt" - - "github.com/hashicorp/terraform/addrs" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/schema" - - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" -) - -// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests -func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { - state := terraform.NewState() - - // in the odd case of a nil state, let the helper packages handle it - if newState == nil { - return nil, nil - } - - for _, newMod := range newState.Modules { - mod := state.AddModule(newMod.Addr) - - for name, out := range newMod.OutputValues { - outputType := "" - val := hcl2shim.ConfigValueFromHCL2(out.Value) - ty := out.Value.Type() - switch { - case ty == cty.String: - outputType = "string" - case ty.IsTupleType() || ty.IsListType(): - outputType = "list" - case ty.IsMapType(): - outputType = "map" - } - - mod.Outputs[name] = &terraform.OutputState{ - Type: outputType, - Value: val, - Sensitive: out.Sensitive, - } - } - - for _, res := range newMod.Resources { - resType := res.Addr.Resource.Type - providerType := res.ProviderConfig.Provider.Type - - resource := getResource(providers, providerType, res.Addr.Resource) - - for key, i := range res.Instances { - resState := &terraform.ResourceState{ - Type: resType, - Provider: legacyProviderConfigString(res.ProviderConfig), - } - - // We should always have a Current instance here, but be safe about checking. - if i.Current != nil { - flatmap, err := shimmedAttributes(i.Current, resource) - if err != nil { - return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if i.Current.Private != nil { - err := json.Unmarshal(i.Current.Private, &meta) - if err != nil { - return nil, err - } - } - - resState.Primary = &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: i.Current.Status == states.ObjectTainted, - Meta: meta, - } - - if i.Current.SchemaVersion != 0 { - if resState.Primary.Meta == nil { - resState.Primary.Meta = map[string]interface{}{} - } - resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion - } - - // convert the indexes to the old style flapmap indexes - idx := "" - switch key.(type) { - case addrs.IntKey: - // don't add numeric index values to resources with a count of 0 - if len(res.Instances) > 1 { - idx = fmt.Sprintf(".%d", key) - } - case addrs.StringKey: - idx = "." + key.String() - } - - mod.Resources[res.Addr.Resource.String()+idx] = resState - } - - // add any deposed instances - for _, dep := range i.Deposed { - flatmap, err := shimmedAttributes(dep, resource) - if err != nil { - return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) - } - - var meta map[string]interface{} - if dep.Private != nil { - err := json.Unmarshal(dep.Private, &meta) - if err != nil { - return nil, err - } - } - - deposed := &terraform.InstanceState{ - ID: flatmap["id"], - Attributes: flatmap, - Tainted: dep.Status == states.ObjectTainted, - Meta: meta, - } - if dep.SchemaVersion != 0 { - deposed.Meta = map[string]interface{}{ - "schema_version": dep.SchemaVersion, - } - } - - resState.Deposed = append(resState.Deposed, deposed) - } - } - } - } - - return state, nil -} - -func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { - p := providers[providerName] - if p == nil { - panic(fmt.Sprintf("provider %q not found in test step", providerName)) - } - - // this is only for tests, so should only see schema.Providers - provider := p.(*schema.Provider) - - switch addr.Mode { - case addrs.ManagedResourceMode: - resource := provider.ResourcesMap[addr.Type] - if resource != nil { - return resource - } - case addrs.DataResourceMode: - resource := provider.DataSourcesMap[addr.Type] - if resource != nil { - return resource - } - } - - panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) -} - -func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { - flatmap := instance.AttrsFlat - if flatmap != nil { - return flatmap, nil - } - - // if we have json attrs, they need to be decoded - rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) - if err != nil { - return nil, err - } - - instanceState, err := res.ShimInstanceStateFromValue(rio.Value) - if err != nil { - return nil, err - } - - return instanceState.Attributes, nil -} - -func shimLegacyState(legacy *terraform.State) (*states.State, error) { - state, err := terraform.ShimLegacyState(legacy) - if err != nil { - return nil, err - } - - if state.HasResources() { - for _, module := range state.Modules { - for name, resource := range module.Resources { - module.Resources[name].ProviderConfig.Provider = addrs.ImpliedProviderForUnqualifiedType(resource.Addr.Resource.ImpliedProvider()) - } - } - } - return state, err -} - -// legacyProviderConfigString was copied from addrs.Provider.LegacyString() to -// create a legacy-style string from a non-legacy provider. This is only -// necessary as this package shims back and forth between legacy and modern -// state, neither of which encode the addrs.Provider for a resource. -func legacyProviderConfigString(pc addrs.AbsProviderConfig) string { - if pc.Alias != "" { - if len(pc.Module) == 0 { - return fmt.Sprintf("%s.%s.%s", "provider", pc.Provider.Type, pc.Alias) - } else { - return fmt.Sprintf("%s.%s.%s.%s", pc.Module.String(), "provider", pc.Provider.LegacyString(), pc.Alias) - } - } - if len(pc.Module) == 0 { - return fmt.Sprintf("%s.%s", "provider", pc.Provider.Type) - } - return fmt.Sprintf("%s.%s.%s", pc.Module.String(), "provider", pc.Provider.Type) -} diff --git a/helper/resource/state_shim_test.go b/helper/resource/state_shim_test.go deleted file mode 100644 index 789e1295f..000000000 --- a/helper/resource/state_shim_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package resource - -import ( - "testing" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" - "github.com/zclconf/go-cty/cty" -) - -// TestStateShim is meant to be a fairly comprehensive test, checking for dependencies, root outputs, -func TestStateShim(t *testing.T) { - state := states.NewState() - - rootModule := state.RootModule() - if rootModule == nil { - t.Errorf("root module is nil; want valid object") - } - - rootModule.SetOutputValue("bar", cty.ListVal([]cty.Value{cty.StringVal("bar"), cty.StringVal("value")}), false) - rootModule.SetOutputValue("secret", cty.StringVal("secret value"), true) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsFlat: map[string]string{"id": "foo", "bazzle": "dazzle"}, - SchemaVersion: 7, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - rootModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "baz", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsFlat: map[string]string{"id": "baz", "bazzle": "dazzle"}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - - childInstance := addrs.RootModuleInstance.Child("child", addrs.NoKey) - childModule := state.EnsureModule(childInstance) - childModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "test_data_thing", - Name: "foo", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id": "bar", "fuzzle":"wuzzle"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: childInstance.Module(), - }, - ) - childModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "baz", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id": "bar", "fizzle":"wizzle"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: childInstance.Module(), - }, - ) - - childModule.SetResourceInstanceDeposed( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "baz", - }.Instance(addrs.NoKey), - "00000001", - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsFlat: map[string]string{"id": "old", "fizzle": "wizzle"}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: childInstance.Module(), - }, - ) - - childModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "lots", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsFlat: map[string]string{"id": "0", "bazzle": "dazzle"}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: childInstance.Module(), - }, - ) - childModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "lots", - }.Instance(addrs.IntKey(1)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsFlat: map[string]string{"id": "1", "bazzle": "dazzle"}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: childInstance.Module(), - }, - ) - - childModule.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "single_count", - }.Instance(addrs.IntKey(0)), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsJSON: []byte(`{"id": "single", "bazzle":"dazzle"}`), - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: childInstance.Module(), - }, - ) - - expected := &terraform.State{ - Version: 3, - Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ - Path: []string{"root"}, - Outputs: map[string]*terraform.OutputState{ - "bar": { - Type: "list", - Value: []interface{}{"bar", "value"}, - }, - "secret": { - Sensitive: true, - Type: "string", - Value: "secret value", - }, - }, - Resources: map[string]*terraform.ResourceState{ - "test_thing.baz": &terraform.ResourceState{ - Type: "test_thing", - Provider: "provider.test", - Primary: &terraform.InstanceState{ - ID: "baz", - Attributes: map[string]string{ - "id": "baz", - "bazzle": "dazzle", - }, - }, - }, - "test_thing.foo": &terraform.ResourceState{ - Type: "test_thing", - Provider: "provider.test", - Primary: &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "id": "foo", - "bazzle": "dazzle", - }, - Meta: map[string]interface{}{ - "schema_version": 7, - }, - }, - }, - }, - }, - &terraform.ModuleState{ - Path: []string{"root", "child"}, - Resources: map[string]*terraform.ResourceState{ - "test_thing.baz": &terraform.ResourceState{ - Type: "test_thing", - Provider: "module.child.provider.test", - Primary: &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "fizzle": "wizzle", - }, - }, - Deposed: []*terraform.InstanceState{ - { - ID: "old", - Attributes: map[string]string{ - "id": "old", - "fizzle": "wizzle", - }, - }, - }, - }, - "data.test_data_thing.foo": &terraform.ResourceState{ - Type: "test_data_thing", - Provider: "module.child.provider.test", - Primary: &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "fuzzle": "wuzzle", - }, - }, - }, - "test_thing.lots.0": &terraform.ResourceState{ - Type: "test_thing", - Provider: "module.child.provider.test", - Primary: &terraform.InstanceState{ - ID: "0", - Attributes: map[string]string{ - "id": "0", - "bazzle": "dazzle", - }, - }, - }, - "test_thing.lots.1": &terraform.ResourceState{ - Type: "test_thing", - Provider: "module.child.provider.test", - Primary: &terraform.InstanceState{ - ID: "1", - Attributes: map[string]string{ - "id": "1", - "bazzle": "dazzle", - }, - Tainted: true, - }, - }, - "test_thing.single_count": &terraform.ResourceState{ - Type: "test_thing", - Provider: "module.child.provider.test", - Primary: &terraform.InstanceState{ - ID: "single", - Attributes: map[string]string{ - "id": "single", - "bazzle": "dazzle", - }, - }, - }, - }, - }, - }, - } - - providers := map[string]terraform.ResourceProvider{ - "test": &schema.Provider{ - ResourcesMap: map[string]*schema.Resource{ - "test_thing": &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": {Type: schema.TypeString, Computed: true}, - "fizzle": {Type: schema.TypeString, Optional: true}, - "bazzle": {Type: schema.TypeString, Optional: true}, - }, - }, - }, - DataSourcesMap: map[string]*schema.Resource{ - "test_data_thing": &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": {Type: schema.TypeString, Computed: true}, - "fuzzle": {Type: schema.TypeString, Optional: true}, - }, - }, - }, - }, - } - - shimmed, err := shimNewState(state, providers) - if err != nil { - t.Fatal(err) - } - - if !expected.Equal(shimmed) { - t.Fatalf("wrong result state\ngot:\n%s\n\nwant:\n%s", shimmed, expected) - } -} - -// TestShimLegacyState only checks the functionality unique to this func: adding -// the implied provider FQN -func TestShimLegacyState(t *testing.T) { - - input := &terraform.State{ - Version: 3, - Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test_thing.baz": &terraform.ResourceState{ - Type: "test_thing", - Provider: "provider.test", - Primary: &terraform.InstanceState{ - ID: "baz", - Attributes: map[string]string{ - "id": "baz", - "bazzle": "dazzle", - }, - }, - }, - }, - }, - &terraform.ModuleState{ - Path: []string{"root", "child"}, - Resources: map[string]*terraform.ResourceState{ - "test_thing.bar": &terraform.ResourceState{ - Type: "test_thing", - Provider: "module.child.provider.test", - Primary: &terraform.InstanceState{ - ID: "bar", - Attributes: map[string]string{ - "id": "bar", - "fizzle": "wizzle", - }, - }, - }, - }, - }, - }, - } - - expected := states.NewState() - root := expected.EnsureModule(addrs.RootModuleInstance) - root.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "baz", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsFlat: map[string]string{"id": "baz", "bazzle": "dazzle"}, - Dependencies: []addrs.ConfigResource{}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: addrs.RootModule, - }, - ) - child := expected.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) - child.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_thing", - Name: "bar", - }.Instance(addrs.NoKey), - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectReady, - AttrsFlat: map[string]string{"id": "bar", "fizzle": "wizzle"}, - Dependencies: []addrs.ConfigResource{}, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("test"), - Module: child.Addr.Module(), - }, - ) - - got, err := shimLegacyState(input) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - if !got.Equal(expected) { - t.Fatal("wrong result") - } -} diff --git a/helper/resource/state_test.go b/helper/resource/state_test.go deleted file mode 100644 index 6d6b329a1..000000000 --- a/helper/resource/state_test.go +++ /dev/null @@ -1,329 +0,0 @@ -package resource - -import ( - "errors" - "strings" - "sync/atomic" - "testing" - "time" -) - -func FailedStateRefreshFunc() StateRefreshFunc { - return func() (interface{}, string, error) { - return nil, "", errors.New("failed") - } -} - -func TimeoutStateRefreshFunc() StateRefreshFunc { - return func() (interface{}, string, error) { - time.Sleep(100 * time.Second) - return nil, "", errors.New("failed") - } -} - -func SuccessfulStateRefreshFunc() StateRefreshFunc { - return func() (interface{}, string, error) { - return struct{}{}, "running", nil - } -} - -type StateGenerator struct { - position int - stateSequence []string -} - -func (r *StateGenerator) NextState() (int, string, error) { - p, v := r.position, "" - if len(r.stateSequence)-1 >= p { - v = r.stateSequence[p] - } else { - return -1, "", errors.New("No more states available") - } - - r.position += 1 - - return p, v, nil -} - -func NewStateGenerator(sequence []string) *StateGenerator { - r := &StateGenerator{} - r.stateSequence = sequence - - return r -} - -func InconsistentStateRefreshFunc() StateRefreshFunc { - sequence := []string{ - "done", "replicating", - "done", "done", "done", - "replicating", - "done", "done", "done", - } - - r := NewStateGenerator(sequence) - - return func() (interface{}, string, error) { - idx, s, err := r.NextState() - if err != nil { - return nil, "", err - } - - return idx, s, nil - } -} - -func UnknownPendingStateRefreshFunc() StateRefreshFunc { - sequence := []string{ - "unknown1", "unknown2", "done", - } - - r := NewStateGenerator(sequence) - - return func() (interface{}, string, error) { - idx, s, err := r.NextState() - if err != nil { - return nil, "", err - } - - return idx, s, nil - } -} - -func TestWaitForState_inconsistent_positive(t *testing.T) { - conf := &StateChangeConf{ - Pending: []string{"replicating"}, - Target: []string{"done"}, - Refresh: InconsistentStateRefreshFunc(), - Timeout: 90 * time.Millisecond, - PollInterval: 10 * time.Millisecond, - ContinuousTargetOccurence: 3, - } - - idx, err := conf.WaitForState() - - if err != nil { - t.Fatalf("err: %s", err) - } - - if idx != 4 { - t.Fatalf("Expected index 4, given %d", idx.(int)) - } -} - -func TestWaitForState_inconsistent_negative(t *testing.T) { - refreshCount := int64(0) - f := InconsistentStateRefreshFunc() - refresh := func() (interface{}, string, error) { - atomic.AddInt64(&refreshCount, 1) - return f() - } - - conf := &StateChangeConf{ - Pending: []string{"replicating"}, - Target: []string{"done"}, - Refresh: refresh, - Timeout: 85 * time.Millisecond, - PollInterval: 10 * time.Millisecond, - ContinuousTargetOccurence: 4, - } - - _, err := conf.WaitForState() - - if err == nil { - t.Fatal("Expected timeout error. No error returned.") - } - - // we can't guarantee the exact number of refresh calls in the tests by - // timing them, but we want to make sure the test at least went through th - // required states. - if atomic.LoadInt64(&refreshCount) < 6 { - t.Fatal("refreshed called too few times") - } - - expectedErr := "timeout while waiting for state to become 'done'" - if !strings.HasPrefix(err.Error(), expectedErr) { - t.Fatalf("error prefix doesn't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) - } -} - -func TestWaitForState_timeout(t *testing.T) { - old := refreshGracePeriod - refreshGracePeriod = 5 * time.Millisecond - defer func() { - refreshGracePeriod = old - }() - - conf := &StateChangeConf{ - Pending: []string{"pending", "incomplete"}, - Target: []string{"running"}, - Refresh: TimeoutStateRefreshFunc(), - Timeout: 1 * time.Millisecond, - } - - obj, err := conf.WaitForState() - - if err == nil { - t.Fatal("Expected timeout error. No error returned.") - } - - expectedErr := "timeout while waiting for state to become 'running' (timeout: 1ms)" - if err.Error() != expectedErr { - t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) - } - - if obj != nil { - t.Fatalf("should not return obj") - } -} - -// Make sure a timeout actually cancels the refresh goroutine and waits for its -// return. -func TestWaitForState_cancel(t *testing.T) { - // make this refresh func block until we cancel it - cancel := make(chan struct{}) - refresh := func() (interface{}, string, error) { - <-cancel - return nil, "pending", nil - } - conf := &StateChangeConf{ - Pending: []string{"pending", "incomplete"}, - Target: []string{"running"}, - Refresh: refresh, - Timeout: 10 * time.Millisecond, - PollInterval: 10 * time.Second, - } - - var obj interface{} - var err error - - waitDone := make(chan struct{}) - go func() { - defer close(waitDone) - obj, err = conf.WaitForState() - }() - - // make sure WaitForState is blocked - select { - case <-waitDone: - t.Fatal("WaitForState returned too early") - case <-time.After(10 * time.Millisecond): - } - - // unlock the refresh function - close(cancel) - // make sure WaitForState returns - select { - case <-waitDone: - case <-time.After(time.Second): - t.Fatal("WaitForState didn't return after refresh finished") - } - - if err == nil { - t.Fatal("Expected timeout error. No error returned.") - } - - expectedErr := "timeout while waiting for state to become 'running'" - if !strings.HasPrefix(err.Error(), expectedErr) { - t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) - } - - if obj != nil { - t.Fatalf("should not return obj") - } - -} - -func TestWaitForState_success(t *testing.T) { - conf := &StateChangeConf{ - Pending: []string{"pending", "incomplete"}, - Target: []string{"running"}, - Refresh: SuccessfulStateRefreshFunc(), - Timeout: 200 * time.Second, - } - - obj, err := conf.WaitForState() - if err != nil { - t.Fatalf("err: %s", err) - } - if obj == nil { - t.Fatalf("should return obj") - } -} - -func TestWaitForState_successUnknownPending(t *testing.T) { - conf := &StateChangeConf{ - Target: []string{"done"}, - Refresh: UnknownPendingStateRefreshFunc(), - Timeout: 200 * time.Second, - } - - obj, err := conf.WaitForState() - if err != nil { - t.Fatalf("err: %s", err) - } - if obj == nil { - t.Fatalf("should return obj") - } -} - -func TestWaitForState_successEmpty(t *testing.T) { - conf := &StateChangeConf{ - Pending: []string{"pending", "incomplete"}, - Target: []string{}, - Refresh: func() (interface{}, string, error) { - return nil, "", nil - }, - Timeout: 200 * time.Second, - } - - obj, err := conf.WaitForState() - if err != nil { - t.Fatalf("err: %s", err) - } - if obj != nil { - t.Fatalf("obj should be nil") - } -} - -func TestWaitForState_failureEmpty(t *testing.T) { - conf := &StateChangeConf{ - Pending: []string{"pending", "incomplete"}, - Target: []string{}, - NotFoundChecks: 1, - Refresh: func() (interface{}, string, error) { - return 42, "pending", nil - }, - PollInterval: 10 * time.Millisecond, - Timeout: 100 * time.Millisecond, - } - - _, err := conf.WaitForState() - if err == nil { - t.Fatal("Expected timeout error. Got none.") - } - expectedErr := "timeout while waiting for resource to be gone (last state: 'pending', timeout: 100ms)" - if err.Error() != expectedErr { - t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) - } -} - -func TestWaitForState_failure(t *testing.T) { - conf := &StateChangeConf{ - Pending: []string{"pending", "incomplete"}, - Target: []string{"running"}, - Refresh: FailedStateRefreshFunc(), - Timeout: 200 * time.Second, - } - - obj, err := conf.WaitForState() - if err == nil { - t.Fatal("Expected error. No error returned.") - } - expectedErr := "failed" - if err.Error() != expectedErr { - t.Fatalf("Errors don't match.\nExpected: %q\nGiven: %q\n", expectedErr, err.Error()) - } - if obj != nil { - t.Fatalf("should not return obj") - } -} diff --git a/helper/resource/testing.go b/helper/resource/testing.go deleted file mode 100644 index c36ff4b2e..000000000 --- a/helper/resource/testing.go +++ /dev/null @@ -1,1285 +0,0 @@ -package resource - -import ( - "bytes" - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "reflect" - "regexp" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/colorstring" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/command/format" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configload" - "github.com/hashicorp/terraform/internal/initwd" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/terraform/tfdiags" - - _ "github.com/hashicorp/terraform/internal/logging" -) - -// flagSweep is a flag available when running tests on the command line. It -// contains a comma seperated list of regions to for the sweeper functions to -// run in. This flag bypasses the normal Test path and instead runs functions designed to -// clean up any leaked resources a testing environment could have created. It is -// a best effort attempt, and relies on Provider authors to implement "Sweeper" -// methods for resources. - -// Adding Sweeper methods with AddTestSweepers will -// construct a list of sweeper funcs to be called here. We iterate through -// regions provided by the sweep flag, and for each region we iterate through the -// tests, and exit on any errors. At time of writing, sweepers are ran -// sequentially, however they can list dependencies to be ran first. We track -// the sweepers that have been ran, so as to not run a sweeper twice for a given -// region. -// -// WARNING: -// Sweepers are designed to be destructive. You should not use the -sweep flag -// in any environment that is not strictly a test environment. Resources will be -// destroyed. - -var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") -var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") -var sweeperFuncs map[string]*Sweeper - -// map of sweepers that have ran, and the success/fail status based on any error -// raised -var sweeperRunList map[string]bool - -// type SweeperFunc is a signature for a function that acts as a sweeper. It -// accepts a string for the region that the sweeper is to be ran in. This -// function must be able to construct a valid client for that region. -type SweeperFunc func(r string) error - -type Sweeper struct { - // Name for sweeper. Must be unique to be ran by the Sweeper Runner - Name string - - // Dependencies list the const names of other Sweeper functions that must be ran - // prior to running this Sweeper. This is an ordered list that will be invoked - // recursively at the helper/resource level - Dependencies []string - - // Sweeper function that when invoked sweeps the Provider of specific - // resources - F SweeperFunc -} - -func init() { - sweeperFuncs = make(map[string]*Sweeper) -} - -// AddTestSweepers function adds a given name and Sweeper configuration -// pair to the internal sweeperFuncs map. Invoke this function to register a -// resource sweeper to be available for running when the -sweep flag is used -// with `go test`. Sweeper names must be unique to help ensure a given sweeper -// is only ran once per run. -func AddTestSweepers(name string, s *Sweeper) { - if _, ok := sweeperFuncs[name]; ok { - log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) - } - - sweeperFuncs[name] = s -} - -func TestMain(m *testing.M) { - flag.Parse() - if *flagSweep != "" { - // parse flagSweep contents for regions to run - regions := strings.Split(*flagSweep, ",") - - // get filtered list of sweepers to run based on sweep-run flag - sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) - for _, region := range regions { - region = strings.TrimSpace(region) - // reset sweeperRunList for each region - sweeperRunList = map[string]bool{} - - log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) - for _, sweeper := range sweepers { - if err := runSweeperWithRegion(region, sweeper); err != nil { - log.Fatalf("[ERR] error running (%s): %s", sweeper.Name, err) - } - } - - log.Printf("Sweeper Tests ran:\n") - for s, _ := range sweeperRunList { - fmt.Printf("\t- %s\n", s) - } - } - } else { - os.Exit(m.Run()) - } -} - -// filterSweepers takes a comma seperated string listing the names of sweepers -// to be ran, and returns a filtered set from the list of all of sweepers to -// run based on the names given. -func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { - filterSlice := strings.Split(strings.ToLower(f), ",") - if len(filterSlice) == 1 && filterSlice[0] == "" { - // if the filter slice is a single element of "" then no sweeper list was - // given, so just return the full list - return source - } - - sweepers := make(map[string]*Sweeper) - for name, sweeper := range source { - for _, s := range filterSlice { - if strings.Contains(strings.ToLower(name), s) { - sweepers[name] = sweeper - } - } - } - return sweepers -} - -// runSweeperWithRegion recieves a sweeper and a region, and recursively calls -// itself with that region for every dependency found for that sweeper. If there -// are no dependencies, invoke the contained sweeper fun with the region, and -// add the success/fail status to the sweeperRunList. -func runSweeperWithRegion(region string, s *Sweeper) error { - for _, dep := range s.Dependencies { - if depSweeper, ok := sweeperFuncs[dep]; ok { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) - if err := runSweeperWithRegion(region, depSweeper); err != nil { - return err - } - } else { - log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) - } - } - - if _, ok := sweeperRunList[s.Name]; ok { - log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) - return nil - } - - runE := s.F(region) - if runE == nil { - sweeperRunList[s.Name] = true - } else { - sweeperRunList[s.Name] = false - } - - return runE -} - -const TestEnvVar = "TF_ACC" - -// TestProvider can be implemented by any ResourceProvider to provide custom -// reset functionality at the start of an acceptance test. -// The helper/schema Provider implements this interface. -type TestProvider interface { - TestReset() error -} - -// TestCheckFunc is the callback type used with acceptance tests to check -// the state of a resource. The state passed in is the latest state known, -// or in the case of being after a destroy, it is the last known state when -// it was created. -type TestCheckFunc func(*terraform.State) error - -// ImportStateCheckFunc is the check function for ImportState tests -type ImportStateCheckFunc func([]*terraform.InstanceState) error - -// ImportStateIdFunc is an ID generation function to help with complex ID -// generation for ImportState tests. -type ImportStateIdFunc func(*terraform.State) (string, error) - -// TestCase is a single acceptance test case used to test the apply/destroy -// lifecycle of a resource in a specific configuration. -// -// When the destroy plan is executed, the config from the last TestStep -// is used to plan it. -type TestCase struct { - // IsUnitTest allows a test to run regardless of the TF_ACC - // environment variable. This should be used with care - only for - // fast tests on local resources (e.g. remote state with a local - // backend) but can be used to increase confidence in correct - // operation of Terraform without waiting for a full acctest run. - IsUnitTest bool - - // PreCheck, if non-nil, will be called before any test steps are - // executed. It will only be executed in the case that the steps - // would run, so it can be used for some validation before running - // acceptance tests, such as verifying that keys are setup. - PreCheck func() - - // Providers is the ResourceProvider that will be under test. - // - // Alternately, ProviderFactories can be specified for the providers - // that are valid. This takes priority over Providers. - // - // The end effect of each is the same: specifying the providers that - // are used within the tests. - Providers map[string]terraform.ResourceProvider - ProviderFactories map[string]terraform.ResourceProviderFactory - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // CheckDestroy is called after the resource is finally destroyed - // to allow the tester to test that the resource is truly gone. - CheckDestroy TestCheckFunc - - // Steps are the apply sequences done within the context of the - // same state. Each step can have its own check to verify correctness. - Steps []TestStep - - // The settings below control the "ID-only refresh test." This is - // an enabled-by-default test that tests that a refresh can be - // refreshed with only an ID to result in the same attributes. - // This validates completeness of Refresh. - // - // IDRefreshName is the name of the resource to check. This will - // default to the first non-nil primary resource in the state. - // - // IDRefreshIgnore is a list of configuration keys that will be ignored. - IDRefreshName string - IDRefreshIgnore []string -} - -// TestStep is a single apply sequence of a test, done within the -// context of a state. -// -// Multiple TestSteps can be sequenced in a Test to allow testing -// potentially complex update logic. In general, simply create/destroy -// tests will only need one step. -type TestStep struct { - // ResourceName should be set to the name of the resource - // that is being tested. Example: "aws_instance.foo". Various test - // modes use this to auto-detect state information. - // - // This is only required if the test mode settings below say it is - // for the mode you're using. - ResourceName string - - // PreConfig is called before the Config is applied to perform any per-step - // setup that needs to happen. This is called regardless of "test mode" - // below. - PreConfig func() - - // Taint is a list of resource addresses to taint prior to the execution of - // the step. Be sure to only include this at a step where the referenced - // address will be present in state, as it will fail the test if the resource - // is missing. - // - // This option is ignored on ImportState tests, and currently only works for - // resources in the root module path. - Taint []string - - //--------------------------------------------------------------- - // Test modes. One of the following groups of settings must be - // set to determine what the test step will do. Ideally we would've - // used Go interfaces here but there are now hundreds of tests we don't - // want to re-type so instead we just determine which step logic - // to run based on what settings below are set. - //--------------------------------------------------------------- - - //--------------------------------------------------------------- - // Plan, Apply testing - //--------------------------------------------------------------- - - // Config a string of the configuration to give to Terraform. If this - // is set, then the TestCase will execute this step with the same logic - // as a `terraform apply`. - Config string - - // Check is called after the Config is applied. Use this step to - // make your own API calls to check the status of things, and to - // inspect the format of the ResourceState itself. - // - // If an error is returned, the test will fail. In this case, a - // destroy plan will still be attempted. - // - // If this is nil, no check is done on this step. - Check TestCheckFunc - - // Destroy will create a destroy plan if set to true. - Destroy bool - - // ExpectNonEmptyPlan can be set to true for specific types of tests that are - // looking to verify that a diff occurs - ExpectNonEmptyPlan bool - - // ExpectError allows the construction of test cases that we expect to fail - // with an error. The specified regexp must match against the error for the - // test to pass. - ExpectError *regexp.Regexp - - // PlanOnly can be set to only run `plan` with this configuration, and not - // actually apply it. This is useful for ensuring config changes result in - // no-op plans - PlanOnly bool - - // PreventDiskCleanup can be set to true for testing terraform modules which - // require access to disk at runtime. Note that this will leave files in the - // temp folder - PreventDiskCleanup bool - - // PreventPostDestroyRefresh can be set to true for cases where data sources - // are tested alongside real resources - PreventPostDestroyRefresh bool - - // SkipFunc is called before applying config, but after PreConfig - // This is useful for defining test steps with platform-dependent checks - SkipFunc func() (bool, error) - - //--------------------------------------------------------------- - // ImportState testing - //--------------------------------------------------------------- - - // ImportState, if true, will test the functionality of ImportState - // by importing the resource with ResourceName (must be set) and the - // ID of that resource. - ImportState bool - - // ImportStateId is the ID to perform an ImportState operation with. - // This is optional. If it isn't set, then the resource ID is automatically - // determined by inspecting the state for ResourceName's ID. - ImportStateId string - - // ImportStateIdPrefix is the prefix added in front of ImportStateId. - // This can be useful in complex import cases, where more than one - // attribute needs to be passed on as the Import ID. Mainly in cases - // where the ID is not known, and a known prefix needs to be added to - // the unset ImportStateId field. - ImportStateIdPrefix string - - // ImportStateIdFunc is a function that can be used to dynamically generate - // the ID for the ImportState tests. It is sent the state, which can be - // checked to derive the attributes necessary and generate the string in the - // desired format. - ImportStateIdFunc ImportStateIdFunc - - // ImportStateCheck checks the results of ImportState. It should be - // used to verify that the resulting value of ImportState has the - // proper resources, IDs, and attributes. - ImportStateCheck ImportStateCheckFunc - - // ImportStateVerify, if true, will also check that the state values - // that are finally put into the state after import match for all the - // IDs returned by the Import. Note that this checks for strict equality - // and does not respect DiffSuppressFunc or CustomizeDiff. - // - // ImportStateVerifyIgnore is a list of prefixes of fields that should - // not be verified to be equal. These can be set to ephemeral fields or - // fields that can't be refreshed and don't matter. - ImportStateVerify bool - ImportStateVerifyIgnore []string - - // provider s is used internally to maintain a reference to the - // underlying providers during the tests - providers map[string]terraform.ResourceProvider -} - -// Set to a file mask in sprintf format where %s is test name -const EnvLogPathMask = "TF_LOG_PATH_MASK" - -// ParallelTest performs an acceptance test on a resource, allowing concurrency -// with other ParallelTest. -// -// Tests will fail if they do not properly handle conditions to allow multiple -// tests to occur against the same resource or service (e.g. random naming). -// All other requirements of the Test function also apply to this function. -func ParallelTest(t TestT, c TestCase) { - t.Parallel() - Test(t, c) -} - -// Test performs an acceptance test on a resource. -// -// Tests are not run unless an environmental variable "TF_ACC" is -// set to some non-empty value. This is to avoid test cases surprising -// a user by creating real resources. -// -// Tests will fail unless the verbose flag (`go test -v`, or explicitly -// the "-test.v" flag) is set. Because some acceptance tests take quite -// long, we require the verbose flag so users are able to see progress -// output. -func Test(t TestT, c TestCase) { - // We only run acceptance tests if an env var is set because they're - // slow and generally require some outside configuration. You can opt out - // of this with OverrideEnvVar on individual TestCases. - if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { - t.Skip(fmt.Sprintf( - "Acceptance tests skipped unless env '%s' set", - TestEnvVar)) - return - } - - // We require verbose mode so that the user knows what is going on. - if !testTesting && !testing.Verbose() && !c.IsUnitTest { - t.Fatal("Acceptance tests must be run with the -v flag on tests") - return - } - - // Run the PreCheck if we have it - if c.PreCheck != nil { - c.PreCheck() - } - - providerFactories, err := testProviderFactories(c) - if err != nil { - t.Fatal(err) - } - - // get instances of all providers, so we can use the individual - // resources to shim the state during the tests. - providers := make(map[string]terraform.ResourceProvider) - legacyProviderFactories, err := testProviderFactoriesLegacy(c) - if err != nil { - t.Fatal(err) - } - for name, pf := range legacyProviderFactories { - p, err := pf() - if err != nil { - t.Fatal(err) - } - providers[name] = p - } - - opts := terraform.ContextOpts{Providers: providerFactories} - - // A single state variable to track the lifecycle, starting with no state - var state *terraform.State - - // Go through each step and run it - var idRefreshCheck *terraform.ResourceState - idRefresh := c.IDRefreshName != "" - errored := false - for i, step := range c.Steps { - // insert the providers into the step so we can get the resources for - // shimming the state - step.providers = providers - - var err error - log.Printf("[DEBUG] Test: Executing step %d", i) - - if step.SkipFunc != nil { - skip, err := step.SkipFunc() - if err != nil { - t.Fatal(err) - } - if skip { - log.Printf("[WARN] Skipping step %d", i) - continue - } - } - - if step.Config == "" && !step.ImportState { - err = fmt.Errorf( - "unknown test mode for step. Please see TestStep docs\n\n%#v", - step) - } else { - if step.ImportState { - if step.Config == "" { - step.Config = testProviderConfig(c) - } - - // Can optionally set step.Config in addition to - // step.ImportState, to provide config for the import. - state, err = testStepImportState(opts, state, step) - } else { - state, err = testStepConfig(opts, state, step) - } - } - - // If we expected an error, but did not get one, fail - if err == nil && step.ExpectError != nil { - errored = true - t.Error(fmt.Sprintf( - "Step %d, no error received, but expected a match to:\n\n%s\n\n", - i, step.ExpectError)) - break - } - - // If there was an error, exit - if err != nil { - // Perhaps we expected an error? Check if it matches - if step.ExpectError != nil { - if !step.ExpectError.MatchString(err.Error()) { - errored = true - t.Error(fmt.Sprintf( - "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", - i, err, step.ExpectError)) - break - } - } else { - errored = true - t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) - break - } - } - - // If we've never checked an id-only refresh and our state isn't - // empty, find the first resource and test it. - if idRefresh && idRefreshCheck == nil && !state.Empty() { - // Find the first non-nil resource in the state - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.IDRefreshName]; ok { - idRefreshCheck = v - } - - break - } - } - - // If we have an instance to check for refreshes, do it - // immediately. We do it in the middle of another test - // because it shouldn't affect the overall state (refresh - // is read-only semantically) and we want to fail early if - // this fails. If refresh isn't read-only, then this will have - // caught a different bug. - if idRefreshCheck != nil { - log.Printf( - "[WARN] Test: Running ID-only refresh check on %s", - idRefreshCheck.Primary.ID) - if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { - log.Printf("[ERROR] Test: ID-only test failed: %s", err) - t.Error(fmt.Sprintf( - "[ERROR] Test: ID-only test failed: %s", err)) - break - } - } - } - } - - // If we never checked an id-only refresh, it is a failure. - if idRefresh { - if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { - t.Error("ID-only refresh check never ran.") - } - } - - // If we have a state, then run the destroy - if state != nil { - lastStep := c.Steps[len(c.Steps)-1] - destroyStep := TestStep{ - Config: lastStep.Config, - Check: c.CheckDestroy, - Destroy: true, - PreventDiskCleanup: lastStep.PreventDiskCleanup, - PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, - providers: providers, - } - - log.Printf("[WARN] Test: Executing destroy step") - state, err := testStep(opts, state, destroyStep) - if err != nil { - t.Error(fmt.Sprintf( - "Error destroying resource! WARNING: Dangling resources\n"+ - "may exist. The full state and error is shown below.\n\n"+ - "Error: %s\n\nState: %s", - err, - state)) - } - } else { - log.Printf("[WARN] Skipping destroy test since there is no state.") - } -} - -// testProviderConfig takes the list of Providers in a TestCase and returns a -// config with only empty provider blocks. This is useful for Import, where no -// config is provided, but the providers must be defined. -func testProviderConfig(c TestCase) string { - var lines []string - for p := range c.Providers { - lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) - } - - return strings.Join(lines, "") -} - -// testProviderFactoriesLegacy is like testProviderFactories but it returns -// providers implementing the legacy interface terraform.ResourceProvider, -// rather than the current providers.Interface. -// -// It also identifies all providers as legacy-style single names rather than -// full addresses, for compatibility with legacy code that doesn't understand -// FQNs. -func testProviderFactoriesLegacy(c TestCase) (map[string]terraform.ResourceProviderFactory, error) { - ctxProviders := make(map[string]terraform.ResourceProviderFactory) - for k, pf := range c.ProviderFactories { - ctxProviders[k] = pf - } - - // add any fixed providers - for k, p := range c.Providers { - ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) - } - return ctxProviders, nil -} - -// testProviderFactories combines the fixed Providers and -// ResourceProviderFactory functions into a single map of -// ResourceProviderFactory functions. -func testProviderFactories(c TestCase) (map[addrs.Provider]providers.Factory, error) { - ctxProviders, err := testProviderFactoriesLegacy(c) - if err != nil { - return nil, err - } - - // We additionally wrap all of the factories as a GRPCTestProvider, which - // allows them to appear as a new-style providers.Interface, rather than - // the legacy terraform.ResourceProvider. - newProviders := make(map[addrs.Provider]providers.Factory) - for legacyName, pf := range ctxProviders { - factory := pf // must copy to ensure each closure sees its own value - newProviders[addrs.NewDefaultProvider(legacyName)] = func() (providers.Interface, error) { - p, err := factory() - if err != nil { - return nil, err - } - - // The provider is wrapped in a GRPCTestProvider so that it can be - // passed back to terraform core as a providers.Interface, rather - // than the legacy ResourceProvider. - return GRPCTestProvider(p), nil - } - } - - return newProviders, nil -} - -// UnitTest is a helper to force the acceptance testing harness to run in the -// normal unit test suite. This should only be used for resource that don't -// have any external dependencies. -func UnitTest(t TestT, c TestCase) { - c.IsUnitTest = true - Test(t, c) -} - -func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { - // TODO: We guard by this right now so master doesn't explode. We - // need to remove this eventually to make this part of the normal tests. - if os.Getenv("TF_ACC_IDONLY") == "" { - return nil - } - - addr := addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: r.Type, - Name: "foo", - }.Instance(addrs.NoKey) - absAddr := addr.Absolute(addrs.RootModuleInstance) - - // Build the state. The state is just the resource with an ID. There - // are no attributes. We only set what is needed to perform a refresh. - state := states.NewState() - state.RootModule().SetResourceInstanceCurrent( - addr, - &states.ResourceInstanceObjectSrc{ - AttrsFlat: r.Primary.Attributes, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("placeholder"), - Module: addrs.RootModule, - }, - ) - - // Create the config module. We use the full config because Refresh - // doesn't have access to it and we may need things like provider - // configurations. The initial implementation of id-only checks used - // an empty config module, but that caused the aforementioned problems. - cfg, err := testConfig(opts, step) - if err != nil { - return err - } - - // Initialize the context - opts.Config = cfg - opts.State = state - ctx, ctxDiags := terraform.NewContext(&opts) - if ctxDiags.HasErrors() { - return ctxDiags.Err() - } - if diags := ctx.Validate(); len(diags) > 0 { - if diags.HasErrors() { - return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) - } - - // Refresh! - state, refreshDiags := ctx.Refresh() - if refreshDiags.HasErrors() { - return refreshDiags.Err() - } - - // Verify attribute equivalence. - actualR := state.ResourceInstance(absAddr) - if actualR == nil { - return fmt.Errorf("Resource gone!") - } - if actualR.Current == nil { - return fmt.Errorf("Resource has no primary instance") - } - actual := actualR.Current.AttrsFlat - expected := r.Primary.Attributes - // Remove fields we're ignoring - for _, v := range c.IDRefreshIgnore { - for k, _ := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k, _ := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return fmt.Errorf( - "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - - return nil -} - -func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { - if step.PreConfig != nil { - step.PreConfig() - } - - cfgPath, err := ioutil.TempDir("", "tf-test") - if err != nil { - return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) - } - - if step.PreventDiskCleanup { - log.Printf("[INFO] Skipping defer os.RemoveAll call") - } else { - defer os.RemoveAll(cfgPath) - } - - // Write the main configuration file - err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating temporary file for config: %s", err) - } - - // Create directory for our child modules, if any. - modulesDir := filepath.Join(cfgPath, ".modules") - err = os.Mkdir(modulesDir, os.ModePerm) - if err != nil { - return nil, fmt.Errorf("Error creating child modules directory: %s", err) - } - - inst := initwd.NewModuleInstaller(modulesDir, nil) - _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) - if installDiags.HasErrors() { - return nil, installDiags.Err() - } - - loader, err := configload.NewLoader(&configload.Config{ - ModulesDir: modulesDir, - }) - if err != nil { - return nil, fmt.Errorf("failed to create config loader: %s", err) - } - - config, configDiags := loader.LoadConfig(cfgPath) - if configDiags.HasErrors() { - return nil, configDiags - } - - return config, nil -} - -func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { - if c.ResourceName == "" { - return nil, fmt.Errorf("ResourceName must be set in TestStep") - } - - for _, m := range state.Modules { - if len(m.Resources) > 0 { - if v, ok := m.Resources[c.ResourceName]; ok { - return v, nil - } - } - } - - return nil, fmt.Errorf( - "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) -} - -// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - for i, f := range fs { - if err := f(s); err != nil { - return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) - } - } - - return nil - } -} - -// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into -// a single TestCheckFunc. -// -// As a user testing their provider, this lets you decompose your checks -// into smaller pieces more easily. -// -// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the -// TestCheckFuncs and aggregates failures. -func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { - return func(s *terraform.State) error { - var result *multierror.Error - - for i, f := range fs { - if err := f(s); err != nil { - result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) - } - } - - return result.ErrorOrNil() - } -} - -// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value -// exists in state for the given name/key combination. It is useful when -// testing that computed values were set, when it is not possible to -// know ahead of time what the values will be. -func TestCheckResourceAttrSet(name, key string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckResourceAttrSet(is, name, key) - } -} - -// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with -// support for non-root modules -func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckResourceAttrSet(is, name, key) - } -} - -func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { - if val, ok := is.Attributes[key]; !ok || val == "" { - return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) - } - - return nil -} - -// TestCheckResourceAttr is a TestCheckFunc which validates -// the value in state for the given name/key combination. -func TestCheckResourceAttr(name, key, value string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckResourceAttr(is, name, key, value) - } -} - -// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with -// support for non-root modules -func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckResourceAttr(is, name, key, value) - } -} - -func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { - // Empty containers may be elided from the state. - // If the intent here is to check for an empty container, allow the key to - // also be non-existent. - emptyCheck := false - if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { - emptyCheck = true - } - - if v, ok := is.Attributes[key]; !ok || v != value { - if emptyCheck && !ok { - return nil - } - - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) - } - - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - name, - key, - value, - v) - } - return nil -} - -// TestCheckNoResourceAttr is a TestCheckFunc which ensures that -// NO value exists in state for the given name/key combination. -func TestCheckNoResourceAttr(name, key string) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testCheckNoResourceAttr(is, name, key) - } -} - -// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with -// support for non-root modules -func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testCheckNoResourceAttr(is, name, key) - } -} - -func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { - // Empty containers may sometimes be included in the state. - // If the intent here is to check for an empty container, allow the value to - // also be "0". - emptyCheck := false - if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { - emptyCheck = true - } - - val, exists := is.Attributes[key] - if emptyCheck && val == "0" { - return nil - } - - if exists { - return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) - } - - return nil -} - -// TestMatchResourceAttr is a TestCheckFunc which checks that the value -// in state for the given name/key combination matches the given regex. -func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - is, err := primaryInstanceState(s, name) - if err != nil { - return err - } - - return testMatchResourceAttr(is, name, key, r) - } -} - -// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with -// support for non-root modules -func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { - mpt := addrs.Module(mp).UnkeyedInstanceShim() - return func(s *terraform.State) error { - is, err := modulePathPrimaryInstanceState(s, mpt, name) - if err != nil { - return err - } - - return testMatchResourceAttr(is, name, key, r) - } -} - -func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { - if !r.MatchString(is.Attributes[key]) { - return fmt.Errorf( - "%s: Attribute '%s' didn't match %q, got %#v", - name, - key, - r.String(), - is.Attributes[key]) - } - - return nil -} - -// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the -// value is a pointer so that it can be updated while the test is running. -// It will only be dereferenced at the point this step is run. -func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckResourceAttr(name, key, *value)(s) - } -} - -// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with -// support for non-root modules -func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { - return func(s *terraform.State) error { - return TestCheckModuleResourceAttr(mp, name, key, *value)(s) - } -} - -// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values -// in state for a pair of name/key combinations are equal. -func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { - return func(s *terraform.State) error { - isFirst, err := primaryInstanceState(s, nameFirst) - if err != nil { - return err - } - - isSecond, err := primaryInstanceState(s, nameSecond) - if err != nil { - return err - } - - return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - } -} - -// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with -// support for non-root modules -func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { - mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() - mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() - return func(s *terraform.State) error { - isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) - if err != nil { - return err - } - - isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) - if err != nil { - return err - } - - return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) - } -} - -func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { - vFirst, okFirst := isFirst.Attributes[keyFirst] - vSecond, okSecond := isSecond.Attributes[keySecond] - - // Container count values of 0 should not be relied upon, and not reliably - // maintained by helper/schema. For the purpose of tests, consider unset and - // 0 to be equal. - if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && - (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { - // they have the same suffix, and it is a collection count key. - if vFirst == "0" || vFirst == "" { - okFirst = false - } - if vSecond == "0" || vSecond == "" { - okSecond = false - } - } - - if okFirst != okSecond { - if !okFirst { - return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) - } - return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) - } - if !(okFirst || okSecond) { - // If they both don't exist then they are equally unset, so that's okay. - return nil - } - - if vFirst != vSecond { - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - nameFirst, - keyFirst, - vSecond, - vFirst) - } - - return nil -} - -// TestCheckOutput checks an output in the Terraform configuration -func TestCheckOutput(name, value string) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Value != value { - return fmt.Errorf( - "Output '%s': expected %#v, got %#v", - name, - value, - rs) - } - - return nil - } -} - -func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { - return func(s *terraform.State) error { - ms := s.RootModule() - rs, ok := ms.Outputs[name] - if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if !r.MatchString(rs.Value.(string)) { - return fmt.Errorf( - "Output '%s': %#v didn't match %q", - name, - rs, - r.String()) - } - - return nil - } -} - -// TestT is the interface used to handle the test lifecycle of a test. -// -// Users should just use a *testing.T object, which implements this. -type TestT interface { - Error(args ...interface{}) - Fatal(args ...interface{}) - Skip(args ...interface{}) - Name() string - Parallel() -} - -// This is set to true by unit tests to alter some behavior -var testTesting = false - -// modulePrimaryInstanceState returns the instance state for the given resource -// name in a ModuleState -func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { - rs, ok := ms.Resources[name] - if !ok { - return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) - } - - is := rs.Primary - if is == nil { - return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) - } - - return is, nil -} - -// modulePathPrimaryInstanceState returns the primary instance state for the -// given resource name in a given module path. -func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { - ms := s.ModuleByPath(mp) - if ms == nil { - return nil, fmt.Errorf("No module found at: %s", mp) - } - - return modulePrimaryInstanceState(s, ms, name) -} - -// primaryInstanceState returns the primary instance state for the given -// resource name in the root module. -func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() - return modulePrimaryInstanceState(s, ms, name) -} - -// operationError is a specialized implementation of error used to describe -// failures during one of the several operations performed for a particular -// test case. -type operationError struct { - OpName string - Diags tfdiags.Diagnostics -} - -func newOperationError(opName string, diags tfdiags.Diagnostics) error { - return operationError{opName, diags} -} - -// Error returns a terse error string containing just the basic diagnostic -// messages, for situations where normal Go error behavior is appropriate. -func (err operationError) Error() string { - return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) -} - -// ErrorDetail is like Error except it includes verbosely-rendered diagnostics -// similar to what would come from a normal Terraform run, which include -// additional context not included in Error(). -func (err operationError) ErrorDetail() string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "errors during %s:", err.OpName) - clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} - for _, diag := range err.Diags { - diagStr := format.Diagnostic(diag, nil, clr, 78) - buf.WriteByte('\n') - buf.WriteString(diagStr) - } - return buf.String() -} - -// detailedErrorMessage is a helper for calling ErrorDetail on an error if -// it is an operationError or just taking Error otherwise. -func detailedErrorMessage(err error) string { - switch tErr := err.(type) { - case operationError: - return tErr.ErrorDetail() - default: - return err.Error() - } -} diff --git a/helper/resource/testing_config.go b/helper/resource/testing_config.go deleted file mode 100644 index 74739c8a0..000000000 --- a/helper/resource/testing_config.go +++ /dev/null @@ -1,378 +0,0 @@ -package resource - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "log" - "sort" - "strings" - - "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/states" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/terraform" - "github.com/hashicorp/terraform/tfdiags" -) - -// testStepConfig runs a config-mode test step -func testStepConfig( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - return testStep(opts, state, step) -} - -func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { - if !step.Destroy { - if err := testStepTaint(state, step); err != nil { - return state, err - } - } - - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - var stepDiags tfdiags.Diagnostics - - // Build the context - opts.Config = cfg - opts.State, err = shimLegacyState(state) - if err != nil { - return nil, err - } - - opts.Destroy = step.Destroy - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) - } - if stepDiags := ctx.Validate(); len(stepDiags) > 0 { - if stepDiags.HasErrors() { - return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) - } - - log.Printf("[WARN] Config warnings:\n%s", stepDiags) - } - - // If this step is a PlanOnly step, skip over this first Plan and subsequent - // Apply, and use the follow up Plan that checks for perpetual diffs - if !step.PlanOnly { - // Plan! - p, stepDiags := ctx.Plan() - if stepDiags.HasErrors() { - return state, newOperationError("plan", stepDiags) - } - - newState := p.State - log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) - - // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behavior in the check - // function - stateBeforeApplication := state.DeepCopy() - - // Apply the diff, creating real resources. - newState, stepDiags = ctx.Apply() - // shim the state first so the test can check the state on errors - state, err = shimNewState(newState, step.providers) - if err != nil { - return nil, err - } - if stepDiags.HasErrors() { - return state, newOperationError("apply", stepDiags) - } - - // Run any configured checks - if step.Check != nil { - if step.Destroy { - if err := step.Check(stateBeforeApplication); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } else { - if err := step.Check(state); err != nil { - return state, fmt.Errorf("Check failed: %s", err) - } - } - } - } - - // Now, verify that Plan is now empty and we don't have a perpetual diff issue - // We do this with TWO plans. One without a refresh. - p, stepDiags := ctx.Plan() - if stepDiags.HasErrors() { - return state, newOperationError("follow-up plan", stepDiags) - } - - // we don't technically need this any longer with plan handling refreshing, - // but run it anyway to ensure the context is working as expected. - p, stepDiags = ctx.Plan() - if stepDiags.HasErrors() { - return state, newOperationError("second follow-up plan", stepDiags) - } - empty := true - newState := p.State - - // the legacy tests never took outputs into account - for _, c := range p.Changes.Resources { - if c.Action != plans.NoOp { - empty = false - break - } - } - - if !empty { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - if !empty { - if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } else { - return state, fmt.Errorf( - "After applying this step and refreshing, "+ - "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) - } - } - - // Made it here, but expected a non-empty plan, fail! - if step.ExpectNonEmptyPlan && empty { - return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") - } - - // Made it here? Good job test step! - return state, nil -} - -// legacyPlanComparisonString produces a string representation of the changes -// from a plan and a given state togther, as was formerly produced by the -// String method of terraform.Plan. -// -// This is here only for compatibility with existing tests that predate our -// new plan and state types, and should not be used in new tests. Instead, use -// a library like "cmp" to do a deep equality and diff on the two -// data structures. -func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { - return fmt.Sprintf( - "DIFF:\n\n%s\n\nSTATE:\n\n%s", - legacyDiffComparisonString(changes), - state.String(), - ) -} - -// legacyDiffComparisonString produces a string representation of the changes -// from a planned changes object, as was formerly produced by the String method -// of terraform.Diff. -// -// This is here only for compatibility with existing tests that predate our -// new plan types, and should not be used in new tests. Instead, use a library -// like "cmp" to do a deep equality check and diff on the two data structures. -func legacyDiffComparisonString(changes *plans.Changes) string { - // The old string representation of a plan was grouped by module, but - // our new plan structure is not grouped in that way and so we'll need - // to preprocess it in order to produce that grouping. - type ResourceChanges struct { - Current *plans.ResourceInstanceChangeSrc - Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc - } - byModule := map[string]map[string]*ResourceChanges{} - resourceKeys := map[string][]string{} - requiresReplace := map[string][]string{} - var moduleKeys []string - for _, rc := range changes.Resources { - if rc.Action == plans.NoOp { - // We won't mention no-op changes here at all, since the old plan - // model we are emulating here didn't have such a concept. - continue - } - moduleKey := rc.Addr.Module.String() - if _, exists := byModule[moduleKey]; !exists { - moduleKeys = append(moduleKeys, moduleKey) - byModule[moduleKey] = make(map[string]*ResourceChanges) - } - resourceKey := rc.Addr.Resource.String() - if _, exists := byModule[moduleKey][resourceKey]; !exists { - resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) - byModule[moduleKey][resourceKey] = &ResourceChanges{ - Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), - } - } - - if rc.DeposedKey == states.NotDeposed { - byModule[moduleKey][resourceKey].Current = rc - } else { - byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc - } - - rr := []string{} - for _, p := range rc.RequiredReplace.List() { - rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) - } - requiresReplace[resourceKey] = rr - } - sort.Strings(moduleKeys) - for _, ks := range resourceKeys { - sort.Strings(ks) - } - - var buf bytes.Buffer - - for _, moduleKey := range moduleKeys { - rcs := byModule[moduleKey] - var mBuf bytes.Buffer - - for _, resourceKey := range resourceKeys[moduleKey] { - rc := rcs[resourceKey] - - forceNewAttrs := requiresReplace[resourceKey] - - crud := "UPDATE" - if rc.Current != nil { - switch rc.Current.Action { - case plans.DeleteThenCreate: - crud = "DESTROY/CREATE" - case plans.CreateThenDelete: - crud = "CREATE/DESTROY" - case plans.Delete: - crud = "DESTROY" - case plans.Create: - crud = "CREATE" - } - } else { - // We must be working on a deposed object then, in which - // case destroying is the only possible action. - crud = "DESTROY" - } - - extra := "" - if rc.Current == nil && len(rc.Deposed) > 0 { - extra = " (deposed only)" - } - - fmt.Fprintf( - &mBuf, "%s: %s%s\n", - crud, resourceKey, extra, - ) - - attrNames := map[string]bool{} - var oldAttrs map[string]string - var newAttrs map[string]string - if rc.Current != nil { - if before := rc.Current.Before; before != nil { - ty, err := before.ImpliedType() - if err == nil { - val, err := before.Decode(ty) - if err == nil { - oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range oldAttrs { - attrNames[k] = true - } - } - } - } - if after := rc.Current.After; after != nil { - ty, err := after.ImpliedType() - if err == nil { - val, err := after.Decode(ty) - if err == nil { - newAttrs = hcl2shim.FlatmapValueFromHCL2(val) - for k := range newAttrs { - attrNames[k] = true - } - } - } - } - } - if oldAttrs == nil { - oldAttrs = make(map[string]string) - } - if newAttrs == nil { - newAttrs = make(map[string]string) - } - - attrNamesOrder := make([]string, 0, len(attrNames)) - keyLen := 0 - for n := range attrNames { - attrNamesOrder = append(attrNamesOrder, n) - if len(n) > keyLen { - keyLen = len(n) - } - } - sort.Strings(attrNamesOrder) - - for _, attrK := range attrNamesOrder { - v := newAttrs[attrK] - u := oldAttrs[attrK] - - if v == hcl2shim.UnknownVariableValue { - v = "" - } - // NOTE: we don't support here because we would - // need schema to do that. Excluding sensitive values - // is now done at the UI layer, and so should not be tested - // at the core layer. - - updateMsg := "" - - // This may not be as precise as in the old diff, as it matches - // everything under the attribute that was originally marked as - // ForceNew, but should help make it easier to determine what - // caused replacement here. - for _, k := range forceNewAttrs { - if strings.HasPrefix(attrK, k) { - updateMsg = " (forces new resource)" - break - } - } - - fmt.Fprintf( - &mBuf, " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, v, - updateMsg, - ) - } - } - - if moduleKey == "" { // root module - buf.Write(mBuf.Bytes()) - buf.WriteByte('\n') - continue - } - - fmt.Fprintf(&buf, "%s:\n", moduleKey) - s := bufio.NewScanner(&mBuf) - for s.Scan() { - buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) - } - } - - return buf.String() -} - -func testStepTaint(state *terraform.State, step TestStep) error { - for _, p := range step.Taint { - m := state.RootModule() - if m == nil { - return errors.New("no state") - } - rs, ok := m.Resources[p] - if !ok { - return fmt.Errorf("resource %q not found in state", p) - } - log.Printf("[WARN] Test: Explicitly tainting resource %q", p) - rs.Taint() - } - return nil -} diff --git a/helper/resource/testing_import_state.go b/helper/resource/testing_import_state.go deleted file mode 100644 index e163770e8..000000000 --- a/helper/resource/testing_import_state.go +++ /dev/null @@ -1,230 +0,0 @@ -package resource - -import ( - "fmt" - "log" - "reflect" - "strings" - - "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/hclsyntax" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/terraform" -) - -// testStepImportState runs an import state test step -func testStepImportState( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - - // Determine the ID to import - var importId string - switch { - case step.ImportStateIdFunc != nil: - var err error - importId, err = step.ImportStateIdFunc(state) - if err != nil { - return state, err - } - case step.ImportStateId != "": - importId = step.ImportStateId - default: - resource, err := testResource(step, state) - if err != nil { - return state, err - } - importId = resource.Primary.ID - } - - importPrefix := step.ImportStateIdPrefix - if importPrefix != "" { - importId = fmt.Sprintf("%s%s", importPrefix, importId) - } - - // Setup the context. We initialize with an empty state. We use the - // full config for provider configurations. - cfg, err := testConfig(opts, step) - if err != nil { - return state, err - } - - opts.Config = cfg - - // import tests start with empty state - opts.State = states.NewState() - - ctx, stepDiags := terraform.NewContext(&opts) - if stepDiags.HasErrors() { - return state, stepDiags.Err() - } - - // The test step provides the resource address as a string, so we need - // to parse it to get an addrs.AbsResourceAddress to pass in to the - // import method. - traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) - if hclDiags.HasErrors() { - return nil, hclDiags - } - importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) - if stepDiags.HasErrors() { - return nil, stepDiags.Err() - } - - // Do the import - importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ - Targets: []*terraform.ImportTarget{ - &terraform.ImportTarget{ - Addr: importAddr, - ID: importId, - }, - }, - }) - if stepDiags.HasErrors() { - log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) - return state, stepDiags.Err() - } - - newState, err := shimNewState(importedState, step.providers) - if err != nil { - return nil, err - } - // Go through the new state and verify - if step.ImportStateCheck != nil { - var states []*terraform.InstanceState - for _, r := range newState.RootModule().Resources { - if r.Primary != nil { - is := r.Primary.DeepCopy() - is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type - states = append(states, is) - } - } - if err := step.ImportStateCheck(states); err != nil { - return state, err - } - } - - // Verify that all the states match - if step.ImportStateVerify { - new := newState.RootModule().Resources - old := state.RootModule().Resources - for _, r := range new { - // Find the existing resource - var oldR *terraform.ResourceState - for _, r2 := range old { - if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { - oldR = r2 - break - } - } - if oldR == nil { - return state, fmt.Errorf( - "Failed state verification, resource with ID %s not found", - r.Primary.ID) - } - - // We'll try our best to find the schema for this resource type - // so we can ignore Removed fields during validation. If we fail - // to find the schema then we won't ignore them and so the test - // will need to rely on explicit ImportStateVerifyIgnore, though - // this shouldn't happen in any reasonable case. - var rsrcSchema *schema.Resource - if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { - // FIXME - providerType := providerAddr.Provider.Type - if provider, ok := step.providers[providerType]; ok { - if provider, ok := provider.(*schema.Provider); ok { - rsrcSchema = provider.ResourcesMap[r.Type] - } - } - } - - // don't add empty flatmapped containers, so we can more easily - // compare the attributes - skipEmpty := func(k, v string) bool { - if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { - if v == "0" { - return true - } - } - return false - } - - // Compare their attributes - actual := make(map[string]string) - for k, v := range r.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - actual[k] = v - } - - expected := make(map[string]string) - for k, v := range oldR.Primary.Attributes { - if skipEmpty(k, v) { - continue - } - expected[k] = v - } - - // Remove fields we're ignoring - for _, v := range step.ImportStateVerifyIgnore { - for k := range actual { - if strings.HasPrefix(k, v) { - delete(actual, k) - } - } - for k := range expected { - if strings.HasPrefix(k, v) { - delete(expected, k) - } - } - } - - // Also remove any attributes that are marked as "Removed" in the - // schema, if we have a schema to check that against. - if rsrcSchema != nil { - for k := range actual { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(actual, k) - break - } - } - } - for k := range expected { - for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { - if schema.Removed != "" { - delete(expected, k) - break - } - } - } - } - - if !reflect.DeepEqual(actual, expected) { - // Determine only the different attributes - for k, v := range expected { - if av, ok := actual[k]; ok && v == av { - delete(expected, k) - delete(actual, k) - } - } - - spewConf := spew.NewDefaultConfig() - spewConf.SortKeys = true - return state, fmt.Errorf( - "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ - "\n\n%s\n\n%s", - spewConf.Sdump(actual), spewConf.Sdump(expected)) - } - } - } - - // Return the old state (non-imported) so we don't change anything. - return state, nil -} diff --git a/helper/resource/testing_import_state_test.go b/helper/resource/testing_import_state_test.go deleted file mode 100644 index 9b2acc3c9..000000000 --- a/helper/resource/testing_import_state_test.go +++ /dev/null @@ -1,517 +0,0 @@ -package resource - -import ( - "errors" - "fmt" - "testing" - - "github.com/hashicorp/terraform/terraform" -) - -func TestTest_importState(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.ImportStateReturn = []*terraform.InstanceState{ - &terraform.InstanceState{ - ID: "foo", - Ephemeral: terraform.EphemeralState{Type: "test_instance"}, - }, - } - mp.RefreshFn = func( - i *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return s, nil - } - - checked := false - checkFn := func(s []*terraform.InstanceState) error { - checked = true - - if s[0].ID != "foo" { - return fmt.Errorf("bad: %#v", s) - } - - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - - Steps: []TestStep{ - TestStep{ - Config: testConfigStrProvider, - ResourceName: "test_instance.foo", - ImportState: true, - ImportStateId: "foo", - ImportStateCheck: checkFn, - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failed: %s", mt.failMessage()) - } - if !checked { - t.Fatal("didn't call check") - } -} - -func TestTest_importStateFail(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.ImportStateReturn = []*terraform.InstanceState{ - &terraform.InstanceState{ - ID: "bar", - Ephemeral: terraform.EphemeralState{Type: "test_instance"}, - }, - } - mp.RefreshFn = func( - i *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return s, nil - } - - checked := false - checkFn := func(s []*terraform.InstanceState) error { - checked = true - - if s[0].ID != "foo" { - return fmt.Errorf("bad: %#v", s) - } - - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - - Steps: []TestStep{ - TestStep{ - Config: testConfigStrProvider, - ResourceName: "test_instance.foo", - ImportState: true, - ImportStateId: "foo", - ImportStateCheck: checkFn, - }, - }, - }) - - if !mt.failed() { - t.Fatal("should fail") - } - if !checked { - t.Fatal("didn't call check") - } -} - -func TestTest_importStateDetectId(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.DiffReturn = nil - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if !diff.Destroy { - return &terraform.InstanceState{ - ID: "foo", - }, nil - } - - return nil, nil - } - - mp.RefreshFn = func( - i *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return s, nil - } - - mp.ImportStateFn = func( - info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) { - if id != "foo" { - return nil, fmt.Errorf("bad import ID: %s", id) - } - - return []*terraform.InstanceState{ - &terraform.InstanceState{ - ID: "bar", - Ephemeral: terraform.EphemeralState{Type: "test_instance"}, - }, - }, nil - } - - checked := false - checkFn := func(s []*terraform.InstanceState) error { - checked = true - - if s[0].ID != "bar" { - return fmt.Errorf("bad: %#v", s) - } - - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - }, - TestStep{ - Config: testConfigStr, - ResourceName: "test_instance.foo", - ImportState: true, - ImportStateCheck: checkFn, - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failed: %s", mt.failMessage()) - } - if !checked { - t.Fatal("didn't call check") - } -} - -func TestTest_importStateIdPrefix(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.DiffReturn = nil - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if !diff.Destroy { - return &terraform.InstanceState{ - ID: "foo", - }, nil - } - - return nil, nil - } - - mp.RefreshFn = func( - i *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return s, nil - } - - mp.ImportStateFn = func( - info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) { - if id != "bazfoo" { - return nil, fmt.Errorf("bad import ID: %s", id) - } - - return []*terraform.InstanceState{ - { - ID: "bar", - Ephemeral: terraform.EphemeralState{Type: "test_instance"}, - }, - }, nil - } - - checked := false - checkFn := func(s []*terraform.InstanceState) error { - checked = true - - if s[0].ID != "bar" { - return fmt.Errorf("bad: %#v", s) - } - - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - - Steps: []TestStep{ - { - Config: testConfigStr, - }, - { - Config: testConfigStr, - ResourceName: "test_instance.foo", - ImportState: true, - ImportStateCheck: checkFn, - ImportStateIdPrefix: "baz", - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failed: %s", mt.failMessage()) - } - if !checked { - t.Fatal("didn't call check") - } -} - -func TestTest_importStateVerify(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.DiffReturn = nil - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if !diff.Destroy { - return &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "foo": "bar", - }, - }, nil - } - - return nil, nil - } - - mp.RefreshFn = func( - i *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - if len(s.Attributes) == 0 { - s.Attributes = map[string]string{ - "id": s.ID, - "foo": "bar", - } - } - - return s, nil - } - - mp.ImportStateFn = func( - info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) { - if id != "foo" { - return nil, fmt.Errorf("bad import ID: %s", id) - } - - return []*terraform.InstanceState{ - &terraform.InstanceState{ - ID: "foo", - Ephemeral: terraform.EphemeralState{Type: "test_instance"}, - }, - }, nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - }, - TestStep{ - Config: testConfigStr, - ResourceName: "test_instance.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failed: %s", mt.failMessage()) - } -} - -func TestTest_importStateVerifyFail(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.DiffReturn = nil - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if !diff.Destroy { - return &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{ - "foo": "bar", - }, - }, nil - } - - return nil, nil - } - - mp.RefreshFn = func( - i *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return s, nil - } - - mp.ImportStateFn = func( - info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) { - if id != "foo" { - return nil, fmt.Errorf("bad import ID: %s", id) - } - - return []*terraform.InstanceState{ - &terraform.InstanceState{ - ID: "foo", - Ephemeral: terraform.EphemeralState{Type: "test_instance"}, - }, - }, nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - }, - TestStep{ - Config: testConfigStr, - ResourceName: "test_instance.foo", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) - - if !mt.failed() { - t.Fatalf("test should fail") - } -} - -func TestTest_importStateIdFunc(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.ImportStateFn = func( - info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) { - if id != "foo:bar" { - return nil, fmt.Errorf("bad import ID: %s", id) - } - - return []*terraform.InstanceState{ - { - ID: "foo", - Ephemeral: terraform.EphemeralState{Type: "test_instance"}, - }, - }, nil - } - - mp.RefreshFn = func( - i *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return s, nil - } - - checked := false - checkFn := func(s []*terraform.InstanceState) error { - checked = true - - if s[0].ID != "foo" { - return fmt.Errorf("bad: %#v", s) - } - - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - - Steps: []TestStep{ - TestStep{ - Config: testConfigStrProvider, - ResourceName: "test_instance.foo", - ImportState: true, - ImportStateIdFunc: func(*terraform.State) (string, error) { return "foo:bar", nil }, - ImportStateCheck: checkFn, - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failed: %s", mt.failMessage()) - } - if !checked { - t.Fatal("didn't call check") - } -} - -func TestTest_importStateIdFuncFail(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.ImportStateFn = func( - info *terraform.InstanceInfo, id string) ([]*terraform.InstanceState, error) { - if id != "foo:bar" { - return nil, fmt.Errorf("bad import ID: %s", id) - } - - return []*terraform.InstanceState{ - { - ID: "foo", - Ephemeral: terraform.EphemeralState{Type: "test_instance"}, - }, - }, nil - } - - mp.RefreshFn = func( - i *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - return s, nil - } - - checkFn := func(s []*terraform.InstanceState) error { - if s[0].ID != "foo" { - return fmt.Errorf("bad: %#v", s) - } - - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - - Steps: []TestStep{ - TestStep{ - Config: testConfigStrProvider, - ResourceName: "test_instance.foo", - ImportState: true, - ImportStateIdFunc: func(*terraform.State) (string, error) { return "foo:bar", errors.New("foobar") }, - ImportStateCheck: checkFn, - }, - }, - }) - - if !mt.failed() { - t.Fatalf("test should fail") - } -} diff --git a/helper/resource/testing_test.go b/helper/resource/testing_test.go deleted file mode 100644 index 5d4891778..000000000 --- a/helper/resource/testing_test.go +++ /dev/null @@ -1,1361 +0,0 @@ -package resource - -import ( - "errors" - "flag" - "fmt" - "os" - "reflect" - "regexp" - "sort" - "strings" - "sync" - "sync/atomic" - "testing" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/terraform" -) - -func init() { - testTesting = true - - // TODO: Remove when we remove the guard on id checks - if err := os.Setenv("TF_ACC_IDONLY", "1"); err != nil { - panic(err) - } - - if err := os.Setenv(TestEnvVar, "1"); err != nil { - panic(err) - } -} - -// wrap the mock provider to implement TestProvider -type resetProvider struct { - *terraform.MockResourceProvider - mu sync.Mutex - TestResetCalled bool - TestResetError error -} - -func (p *resetProvider) TestReset() error { - p.mu.Lock() - defer p.mu.Unlock() - p.TestResetCalled = true - return p.TestResetError -} - -func TestParallelTest(t *testing.T) { - mt := new(mockT) - ParallelTest(mt, TestCase{}) - - if !mt.ParallelCalled { - t.Fatal("Parallel() not called") - } -} - -func TestTest(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := &resetProvider{ - MockResourceProvider: testProvider(), - } - - mp.DiffReturn = nil - - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if !diff.Destroy { - return &terraform.InstanceState{ - ID: "foo", - }, nil - } - - return nil, nil - } - - var refreshCount int32 - mp.RefreshFn = func(*terraform.InstanceInfo, *terraform.InstanceState) (*terraform.InstanceState, error) { - atomic.AddInt32(&refreshCount, 1) - return &terraform.InstanceState{ID: "foo"}, nil - } - - checkDestroy := false - checkStep := false - - checkDestroyFn := func(*terraform.State) error { - checkDestroy = true - return nil - } - - checkStepFn := func(s *terraform.State) error { - checkStep = true - - rs, ok := s.RootModule().Resources["test_instance.foo"] - if !ok { - t.Error("test_instance.foo is not present") - return nil - } - is := rs.Primary - if is.ID != "foo" { - t.Errorf("bad check ID: %s", is.ID) - } - - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - CheckDestroy: checkDestroyFn, - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - Check: checkStepFn, - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failed: %s", mt.failMessage()) - } - if mt.ParallelCalled { - t.Fatal("Parallel() called") - } - if !checkStep { - t.Fatal("didn't call check for step") - } - if !checkDestroy { - t.Fatal("didn't call check for destroy") - } - if !mp.TestResetCalled { - t.Fatal("didn't call TestReset") - } -} - -func TestTest_plan_only(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.ApplyReturn = &terraform.InstanceState{ - ID: "foo", - } - - checkDestroy := false - - checkDestroyFn := func(*terraform.State) error { - checkDestroy = true - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - CheckDestroy: checkDestroyFn, - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - PlanOnly: true, - ExpectNonEmptyPlan: false, - }, - }, - }) - - if !mt.failed() { - t.Fatal("test should've failed") - } - - expected := `Step 0 error: After applying this step, the plan was not empty: - -DIFF: - -CREATE: test_instance.foo - foo: "" => "bar" - -STATE: - -` - - if mt.failMessage() != expected { - t.Fatalf("Expected message: %s\n\ngot:\n\n%s", expected, mt.failMessage()) - } - - if !checkDestroy { - t.Fatal("didn't call check for destroy") - } -} - -func TestTest_idRefresh(t *testing.T) { - t.Skip("test requires new provider implementation") - - // Refresh count should be 3: - // 1.) initial Ref/Plan/Apply - // 2.) post Ref/Plan/Apply for plan-check - // 3.) id refresh check - var expectedRefresh int32 = 3 - - mp := testProvider() - mp.DiffReturn = nil - - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if !diff.Destroy { - return &terraform.InstanceState{ - ID: "foo", - }, nil - } - - return nil, nil - } - - var refreshCount int32 - mp.RefreshFn = func(*terraform.InstanceInfo, *terraform.InstanceState) (*terraform.InstanceState, error) { - atomic.AddInt32(&refreshCount, 1) - return &terraform.InstanceState{ID: "foo"}, nil - } - - mt := new(mockT) - Test(mt, TestCase{ - IDRefreshName: "test_instance.foo", - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failed: %s", mt.failMessage()) - } - - // See declaration of expectedRefresh for why that number - if refreshCount != expectedRefresh { - t.Fatalf("bad refresh count: %d", refreshCount) - } -} - -func TestTest_idRefreshCustomName(t *testing.T) { - t.Skip("test requires new provider implementation") - - // Refresh count should be 3: - // 1.) initial Ref/Plan/Apply - // 2.) post Ref/Plan/Apply for plan-check - // 3.) id refresh check - var expectedRefresh int32 = 3 - - mp := testProvider() - mp.DiffReturn = nil - - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if !diff.Destroy { - return &terraform.InstanceState{ - ID: "foo", - }, nil - } - - return nil, nil - } - - var refreshCount int32 - mp.RefreshFn = func(*terraform.InstanceInfo, *terraform.InstanceState) (*terraform.InstanceState, error) { - atomic.AddInt32(&refreshCount, 1) - return &terraform.InstanceState{ID: "foo"}, nil - } - - mt := new(mockT) - Test(mt, TestCase{ - IDRefreshName: "test_instance.foo", - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failed: %s", mt.failMessage()) - } - - // See declaration of expectedRefresh for why that number - if refreshCount != expectedRefresh { - t.Fatalf("bad refresh count: %d", refreshCount) - } -} - -func TestTest_idRefreshFail(t *testing.T) { - t.Skip("test requires new provider implementation") - - // Refresh count should be 3: - // 1.) initial Ref/Plan/Apply - // 2.) post Ref/Plan/Apply for plan-check - // 3.) id refresh check - var expectedRefresh int32 = 3 - - mp := testProvider() - mp.DiffReturn = nil - - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff) (*terraform.InstanceState, error) { - if !diff.Destroy { - return &terraform.InstanceState{ - ID: "foo", - }, nil - } - - return nil, nil - } - - var refreshCount int32 - mp.RefreshFn = func(*terraform.InstanceInfo, *terraform.InstanceState) (*terraform.InstanceState, error) { - atomic.AddInt32(&refreshCount, 1) - if atomic.LoadInt32(&refreshCount) == expectedRefresh-1 { - return &terraform.InstanceState{ - ID: "foo", - Attributes: map[string]string{"foo": "bar"}, - }, nil - } else if atomic.LoadInt32(&refreshCount) < expectedRefresh { - return &terraform.InstanceState{ID: "foo"}, nil - } else { - return nil, nil - } - } - - mt := new(mockT) - Test(mt, TestCase{ - IDRefreshName: "test_instance.foo", - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - }, - }, - }) - - if !mt.failed() { - t.Fatal("test didn't fail") - } - t.Logf("failure reason: %s", mt.failMessage()) - - // See declaration of expectedRefresh for why that number - if refreshCount != expectedRefresh { - t.Fatalf("bad refresh count: %d", refreshCount) - } -} - -func TestTest_empty(t *testing.T) { - t.Skip("test requires new provider implementation") - - destroyCalled := false - checkDestroyFn := func(*terraform.State) error { - destroyCalled = true - return nil - } - - mt := new(mockT) - Test(mt, TestCase{ - CheckDestroy: checkDestroyFn, - }) - - if mt.failed() { - t.Fatal("test failed") - } - if destroyCalled { - t.Fatal("should not call check destroy if there is no steps") - } -} - -func TestTest_noEnv(t *testing.T) { - t.Skip("test requires new provider implementation") - - // Unset the variable - if err := os.Setenv(TestEnvVar, ""); err != nil { - t.Fatalf("err: %s", err) - } - defer os.Setenv(TestEnvVar, "1") - - mt := new(mockT) - Test(mt, TestCase{}) - - if !mt.SkipCalled { - t.Fatal("skip not called") - } -} - -func TestTest_preCheck(t *testing.T) { - t.Skip("test requires new provider implementation") - - called := false - - mt := new(mockT) - Test(mt, TestCase{ - PreCheck: func() { called = true }, - }) - - if !called { - t.Fatal("precheck should be called") - } -} - -func TestTest_skipFunc(t *testing.T) { - t.Skip("test requires new provider implementation") - - preCheckCalled := false - skipped := false - - mp := testProvider() - mp.ApplyReturn = &terraform.InstanceState{ - ID: "foo", - } - - checkStepFn := func(*terraform.State) error { - return fmt.Errorf("error") - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - PreCheck: func() { preCheckCalled = true }, - Steps: []TestStep{ - { - Config: testConfigStr, - Check: checkStepFn, - SkipFunc: func() (bool, error) { skipped = true; return true, nil }, - }, - }, - }) - - if mt.failed() { - t.Fatal("Expected check to be skipped") - } - - if !preCheckCalled { - t.Fatal("precheck should be called") - } - if !skipped { - t.Fatal("SkipFunc should be called") - } -} - -func TestTest_stepError(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.ApplyReturn = &terraform.InstanceState{ - ID: "foo", - } - - checkDestroy := false - - checkDestroyFn := func(*terraform.State) error { - checkDestroy = true - return nil - } - - checkStepFn := func(*terraform.State) error { - return fmt.Errorf("error") - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - CheckDestroy: checkDestroyFn, - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - Check: checkStepFn, - }, - }, - }) - - if !mt.failed() { - t.Fatal("test should've failed") - } - expected := "Step 0 error: Check failed: error" - if mt.failMessage() != expected { - t.Fatalf("Expected message: %s\n\ngot:\n\n%s", expected, mt.failMessage()) - } - - if !checkDestroy { - t.Fatal("didn't call check for destroy") - } -} - -func TestTest_factoryError(t *testing.T) { - resourceFactoryError := fmt.Errorf("resource factory error") - - factory := func() (terraform.ResourceProvider, error) { - return nil, resourceFactoryError - } - - mt := new(mockT) - Test(mt, TestCase{ - ProviderFactories: map[string]terraform.ResourceProviderFactory{ - "test": factory, - }, - Steps: []TestStep{ - TestStep{ - ExpectError: regexp.MustCompile("resource factory error"), - }, - }, - }) - - if !mt.failed() { - t.Fatal("test should've failed") - } -} - -func TestTest_resetError(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := &resetProvider{ - MockResourceProvider: testProvider(), - TestResetError: fmt.Errorf("provider reset error"), - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - Steps: []TestStep{ - TestStep{ - ExpectError: regexp.MustCompile("provider reset error"), - }, - }, - }) - - if !mt.failed() { - t.Fatal("test should've failed") - } -} - -func TestTest_expectError(t *testing.T) { - t.Skip("test requires new provider implementation") - - cases := []struct { - name string - planErr bool - applyErr bool - badErr bool - }{ - { - name: "successful apply", - planErr: false, - applyErr: false, - }, - { - name: "bad plan", - planErr: true, - applyErr: false, - }, - { - name: "bad apply", - planErr: false, - applyErr: true, - }, - { - name: "bad plan, bad err", - planErr: true, - applyErr: false, - badErr: true, - }, - { - name: "bad apply, bad err", - planErr: false, - applyErr: true, - badErr: true, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - mp := testProvider() - expectedText := "test provider error" - var errText string - if tc.badErr { - errText = "wrong provider error" - } else { - errText = expectedText - } - noErrText := "no error received, but expected a match to" - if tc.planErr { - mp.DiffReturnError = errors.New(errText) - } - if tc.applyErr { - mp.ApplyReturnError = errors.New(errText) - } - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - ExpectError: regexp.MustCompile(expectedText), - Check: func(*terraform.State) error { return nil }, - ExpectNonEmptyPlan: true, - }, - }, - }, - ) - if mt.FatalCalled { - t.Fatalf("fatal: %+v", mt.FatalArgs) - } - switch { - case len(mt.ErrorArgs) < 1 && !tc.planErr && !tc.applyErr: - t.Fatalf("expected error, got none") - case !tc.planErr && !tc.applyErr: - for _, e := range mt.ErrorArgs { - if regexp.MustCompile(noErrText).MatchString(fmt.Sprintf("%v", e)) { - return - } - } - t.Fatalf("expected error to match %s, got %+v", noErrText, mt.ErrorArgs) - case tc.badErr: - for _, e := range mt.ErrorArgs { - if regexp.MustCompile(expectedText).MatchString(fmt.Sprintf("%v", e)) { - return - } - } - t.Fatalf("expected error to match %s, got %+v", expectedText, mt.ErrorArgs) - } - }) - } -} - -func TestComposeAggregateTestCheckFunc(t *testing.T) { - check1 := func(s *terraform.State) error { - return errors.New("Error 1") - } - - check2 := func(s *terraform.State) error { - return errors.New("Error 2") - } - - f := ComposeAggregateTestCheckFunc(check1, check2) - err := f(nil) - if err == nil { - t.Fatalf("Expected errors") - } - - multi := err.(*multierror.Error) - if !strings.Contains(multi.Errors[0].Error(), "Error 1") { - t.Fatalf("Expected Error 1, Got %s", multi.Errors[0]) - } - if !strings.Contains(multi.Errors[1].Error(), "Error 2") { - t.Fatalf("Expected Error 2, Got %s", multi.Errors[1]) - } -} - -func TestComposeTestCheckFunc(t *testing.T) { - cases := []struct { - F []TestCheckFunc - Result string - }{ - { - F: []TestCheckFunc{ - func(*terraform.State) error { return nil }, - }, - Result: "", - }, - - { - F: []TestCheckFunc{ - func(*terraform.State) error { - return fmt.Errorf("error") - }, - func(*terraform.State) error { return nil }, - }, - Result: "Check 1/2 error: error", - }, - - { - F: []TestCheckFunc{ - func(*terraform.State) error { return nil }, - func(*terraform.State) error { - return fmt.Errorf("error") - }, - }, - Result: "Check 2/2 error: error", - }, - - { - F: []TestCheckFunc{ - func(*terraform.State) error { return nil }, - func(*terraform.State) error { return nil }, - }, - Result: "", - }, - } - - for i, tc := range cases { - f := ComposeTestCheckFunc(tc.F...) - err := f(nil) - if err == nil { - err = fmt.Errorf("") - } - if tc.Result != err.Error() { - t.Fatalf("Case %d bad: %s", i, err) - } - } -} - -// mockT implements TestT for testing -type mockT struct { - ErrorCalled bool - ErrorArgs []interface{} - FatalCalled bool - FatalArgs []interface{} - ParallelCalled bool - SkipCalled bool - SkipArgs []interface{} - - f bool -} - -func (t *mockT) Error(args ...interface{}) { - t.ErrorCalled = true - t.ErrorArgs = args - t.f = true -} - -func (t *mockT) Fatal(args ...interface{}) { - t.FatalCalled = true - t.FatalArgs = args - t.f = true -} - -func (t *mockT) Parallel() { - t.ParallelCalled = true -} - -func (t *mockT) Skip(args ...interface{}) { - t.SkipCalled = true - t.SkipArgs = args - t.f = true -} - -func (t *mockT) Name() string { - return "MockedName" -} - -func (t *mockT) failed() bool { - return t.f -} - -func (t *mockT) failMessage() string { - if t.FatalCalled { - return t.FatalArgs[0].(string) - } else if t.ErrorCalled { - return t.ErrorArgs[0].(string) - } else if t.SkipCalled { - return t.SkipArgs[0].(string) - } - - return "unknown" -} - -func testProvider() *terraform.MockResourceProvider { - mp := new(terraform.MockResourceProvider) - mp.DiffReturn = &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - New: "bar", - }, - }, - } - mp.ResourcesReturn = []terraform.ResourceType{ - terraform.ResourceType{Name: "test_instance"}, - } - - return mp -} - -func TestTest_Main(t *testing.T) { - flag.Parse() - if *flagSweep == "" { - // Tests for the TestMain method used for Sweepers will panic without the -sweep - // flag specified. Mock the value for now - *flagSweep = "us-east-1" - } - - cases := []struct { - Name string - Sweepers map[string]*Sweeper - ExpectedRunList []string - SweepRun string - }{ - { - Name: "normal", - Sweepers: map[string]*Sweeper{ - "aws_dummy": &Sweeper{ - Name: "aws_dummy", - F: mockSweeperFunc, - }, - }, - ExpectedRunList: []string{"aws_dummy"}, - }, - { - Name: "with dep", - Sweepers: map[string]*Sweeper{ - "aws_dummy": &Sweeper{ - Name: "aws_dummy", - F: mockSweeperFunc, - }, - "aws_top": &Sweeper{ - Name: "aws_top", - Dependencies: []string{"aws_sub"}, - F: mockSweeperFunc, - }, - "aws_sub": &Sweeper{ - Name: "aws_sub", - F: mockSweeperFunc, - }, - }, - ExpectedRunList: []string{"aws_dummy", "aws_sub", "aws_top"}, - }, - { - Name: "with filter", - Sweepers: map[string]*Sweeper{ - "aws_dummy": &Sweeper{ - Name: "aws_dummy", - F: mockSweeperFunc, - }, - "aws_top": &Sweeper{ - Name: "aws_top", - Dependencies: []string{"aws_sub"}, - F: mockSweeperFunc, - }, - "aws_sub": &Sweeper{ - Name: "aws_sub", - F: mockSweeperFunc, - }, - }, - ExpectedRunList: []string{"aws_dummy"}, - SweepRun: "aws_dummy", - }, - { - Name: "with two filters", - Sweepers: map[string]*Sweeper{ - "aws_dummy": &Sweeper{ - Name: "aws_dummy", - F: mockSweeperFunc, - }, - "aws_top": &Sweeper{ - Name: "aws_top", - Dependencies: []string{"aws_sub"}, - F: mockSweeperFunc, - }, - "aws_sub": &Sweeper{ - Name: "aws_sub", - F: mockSweeperFunc, - }, - }, - ExpectedRunList: []string{"aws_dummy", "aws_sub"}, - SweepRun: "aws_dummy,aws_sub", - }, - { - Name: "with dep and filter", - Sweepers: map[string]*Sweeper{ - "aws_dummy": &Sweeper{ - Name: "aws_dummy", - F: mockSweeperFunc, - }, - "aws_top": &Sweeper{ - Name: "aws_top", - Dependencies: []string{"aws_sub"}, - F: mockSweeperFunc, - }, - "aws_sub": &Sweeper{ - Name: "aws_sub", - F: mockSweeperFunc, - }, - }, - ExpectedRunList: []string{"aws_top", "aws_sub"}, - SweepRun: "aws_top", - }, - { - Name: "filter and none", - Sweepers: map[string]*Sweeper{ - "aws_dummy": &Sweeper{ - Name: "aws_dummy", - F: mockSweeperFunc, - }, - "aws_top": &Sweeper{ - Name: "aws_top", - Dependencies: []string{"aws_sub"}, - F: mockSweeperFunc, - }, - "aws_sub": &Sweeper{ - Name: "aws_sub", - F: mockSweeperFunc, - }, - }, - SweepRun: "none", - }, - } - - for _, tc := range cases { - // reset sweepers - sweeperFuncs = map[string]*Sweeper{} - - t.Run(tc.Name, func(t *testing.T) { - for n, s := range tc.Sweepers { - AddTestSweepers(n, s) - } - *flagSweepRun = tc.SweepRun - - TestMain(&testing.M{}) - - // get list of tests ran from sweeperRunList keys - var keys []string - for k, _ := range sweeperRunList { - keys = append(keys, k) - } - - sort.Strings(keys) - sort.Strings(tc.ExpectedRunList) - if !reflect.DeepEqual(keys, tc.ExpectedRunList) { - t.Fatalf("Expected keys mismatch, expected:\n%#v\ngot:\n%#v\n", tc.ExpectedRunList, keys) - } - }) - } -} - -func mockSweeperFunc(s string) error { - return nil -} - -func TestTest_Taint(t *testing.T) { - t.Skip("test requires new provider implementation") - - mp := testProvider() - mp.DiffFn = func( - _ *terraform.InstanceInfo, - state *terraform.InstanceState, - _ *terraform.ResourceConfig, - ) (*terraform.InstanceDiff, error) { - return &terraform.InstanceDiff{ - DestroyTainted: state.Tainted, - }, nil - } - - mp.ApplyFn = func( - info *terraform.InstanceInfo, - state *terraform.InstanceState, - diff *terraform.InstanceDiff, - ) (*terraform.InstanceState, error) { - var id string - switch { - case diff.Destroy && !diff.DestroyTainted: - return nil, nil - case diff.DestroyTainted: - id = "tainted" - default: - id = "not_tainted" - } - - return &terraform.InstanceState{ - ID: id, - }, nil - } - - mp.RefreshFn = func( - _ *terraform.InstanceInfo, - state *terraform.InstanceState, - ) (*terraform.InstanceState, error) { - return state, nil - } - - mt := new(mockT) - Test(mt, TestCase{ - Providers: map[string]terraform.ResourceProvider{ - "test": mp, - }, - Steps: []TestStep{ - TestStep{ - Config: testConfigStr, - Check: func(s *terraform.State) error { - rs := s.RootModule().Resources["test_instance.foo"] - if rs.Primary.ID != "not_tainted" { - return fmt.Errorf("expected not_tainted, got %s", rs.Primary.ID) - } - return nil - }, - }, - TestStep{ - Taint: []string{"test_instance.foo"}, - Config: testConfigStr, - Check: func(s *terraform.State) error { - rs := s.RootModule().Resources["test_instance.foo"] - if rs.Primary.ID != "tainted" { - return fmt.Errorf("expected tainted, got %s", rs.Primary.ID) - } - return nil - }, - }, - TestStep{ - Taint: []string{"test_instance.fooo"}, - Config: testConfigStr, - ExpectError: regexp.MustCompile("resource \"test_instance.fooo\" not found in state"), - }, - }, - }) - - if mt.failed() { - t.Fatalf("test failure: %s", mt.failMessage()) - } -} - -const testConfigStr = ` -resource "test_instance" "foo" {} -` - -const testConfigStrProvider = ` -provider "test" {} -` - -func TestCheckResourceAttr_empty(t *testing.T) { - s := terraform.NewState() - s.AddModuleState(&terraform.ModuleState{ - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test_resource": &terraform.ResourceState{ - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "empty_list.#": "0", - "empty_map.%": "0", - }, - }, - }, - }, - }) - - for _, key := range []string{ - "empty_list.#", - "empty_map.%", - "missing_list.#", - "missing_map.%", - } { - t.Run(key, func(t *testing.T) { - check := TestCheckResourceAttr("test_resource", key, "0") - if err := check(s); err != nil { - t.Fatal(err) - } - }) - } -} - -func TestCheckNoResourceAttr_empty(t *testing.T) { - s := terraform.NewState() - s.AddModuleState(&terraform.ModuleState{ - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test_resource": &terraform.ResourceState{ - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "empty_list.#": "0", - "empty_map.%": "0", - }, - }, - }, - }, - }) - - for _, key := range []string{ - "empty_list.#", - "empty_map.%", - "missing_list.#", - "missing_map.%", - } { - t.Run(key, func(t *testing.T) { - check := TestCheckNoResourceAttr("test_resource", key) - if err := check(s); err != nil { - t.Fatal(err) - } - }) - } -} - -func TestTestCheckResourceAttrPair(t *testing.T) { - tests := map[string]struct { - state *terraform.State - wantErr string - }{ - "exist match": { - &terraform.State{ - Modules: []*terraform.ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test.a": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "a": "boop", - }, - }, - }, - "test.b": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "b": "boop", - }, - }, - }, - }, - }, - }, - }, - ``, - }, - "nonexist match": { - &terraform.State{ - Modules: []*terraform.ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test.a": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - "test.b": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - }, - }, - }, - }, - ``, - }, - "exist nonmatch": { - &terraform.State{ - Modules: []*terraform.ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test.a": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "a": "beep", - }, - }, - }, - "test.b": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "b": "boop", - }, - }, - }, - }, - }, - }, - }, - `test.a: Attribute 'a' expected "boop", got "beep"`, - }, - "inconsistent exist a": { - &terraform.State{ - Modules: []*terraform.ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test.a": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "a": "beep", - }, - }, - }, - "test.b": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - }, - }, - }, - }, - `test.a: Attribute "a" is "beep", but "b" is not set in test.b`, - }, - "inconsistent exist b": { - &terraform.State{ - Modules: []*terraform.ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test.a": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - "test.b": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "b": "boop", - }, - }, - }, - }, - }, - }, - }, - `test.a: Attribute "a" not set, but "b" is set in test.b as "boop"`, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - fn := TestCheckResourceAttrPair("test.a", "a", "test.b", "b") - err := fn(test.state) - - if test.wantErr != "" { - if err == nil { - t.Fatalf("succeeded; want error\nwant: %s", test.wantErr) - } - if got, want := err.Error(), test.wantErr; got != want { - t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) - } - return - } - - if err != nil { - t.Fatalf("failed; want success\ngot: %s", err.Error()) - } - }) - } -} - -func TestTestCheckResourceAttrPairCount(t *testing.T) { - tests := map[string]struct { - state *terraform.State - attr string - wantErr string - }{ - "unset and 0 equal list": { - &terraform.State{ - Modules: []*terraform.ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test.a": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "a.#": "0", - }, - }, - }, - "test.b": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - }, - }, - }, - }, - "a.#", - ``, - }, - "unset and 0 equal map": { - &terraform.State{ - Modules: []*terraform.ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test.a": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "a.%": "0", - }, - }, - }, - "test.b": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{}, - }, - }, - }, - }, - }, - }, - "a.%", - ``, - }, - "count equal": { - &terraform.State{ - Modules: []*terraform.ModuleState{ - { - Path: []string{"root"}, - Resources: map[string]*terraform.ResourceState{ - "test.a": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "a.%": "1", - }, - }, - }, - "test.b": { - Primary: &terraform.InstanceState{ - Attributes: map[string]string{ - "a.%": "1", - }}, - }, - }, - }, - }, - }, - "a.%", - ``, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - fn := TestCheckResourceAttrPair("test.a", test.attr, "test.b", test.attr) - err := fn(test.state) - - if test.wantErr != "" { - if err == nil { - t.Fatalf("succeeded; want error\nwant: %s", test.wantErr) - } - if got, want := err.Error(), test.wantErr; got != want { - t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) - } - return - } - - if err != nil { - t.Fatalf("failed; want success\ngot: %s", err.Error()) - } - }) - } -} diff --git a/helper/resource/wait.go b/helper/resource/wait.go deleted file mode 100644 index e56a5155d..000000000 --- a/helper/resource/wait.go +++ /dev/null @@ -1,84 +0,0 @@ -package resource - -import ( - "sync" - "time" -) - -// Retry is a basic wrapper around StateChangeConf that will just retry -// a function until it no longer returns an error. -func Retry(timeout time.Duration, f RetryFunc) error { - // These are used to pull the error out of the function; need a mutex to - // avoid a data race. - var resultErr error - var resultErrMu sync.Mutex - - c := &StateChangeConf{ - Pending: []string{"retryableerror"}, - Target: []string{"success"}, - Timeout: timeout, - MinTimeout: 500 * time.Millisecond, - Refresh: func() (interface{}, string, error) { - rerr := f() - - resultErrMu.Lock() - defer resultErrMu.Unlock() - - if rerr == nil { - resultErr = nil - return 42, "success", nil - } - - resultErr = rerr.Err - - if rerr.Retryable { - return 42, "retryableerror", nil - } - return nil, "quit", rerr.Err - }, - } - - _, waitErr := c.WaitForState() - - // Need to acquire the lock here to be able to avoid race using resultErr as - // the return value - resultErrMu.Lock() - defer resultErrMu.Unlock() - - // resultErr may be nil because the wait timed out and resultErr was never - // set; this is still an error - if resultErr == nil { - return waitErr - } - // resultErr takes precedence over waitErr if both are set because it is - // more likely to be useful - return resultErr -} - -// RetryFunc is the function retried until it succeeds. -type RetryFunc func() *RetryError - -// RetryError is the required return type of RetryFunc. It forces client code -// to choose whether or not a given error is retryable. -type RetryError struct { - Err error - Retryable bool -} - -// RetryableError is a helper to create a RetryError that's retryable from a -// given error. -func RetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: true} -} - -// NonRetryableError is a helper to create a RetryError that's _not_ retryable -// from a given error. -func NonRetryableError(err error) *RetryError { - if err == nil { - return nil - } - return &RetryError{Err: err, Retryable: false} -} diff --git a/helper/resource/wait_test.go b/helper/resource/wait_test.go deleted file mode 100644 index 526b21ae3..000000000 --- a/helper/resource/wait_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package resource - -import ( - "fmt" - "testing" - "time" -) - -func TestRetry(t *testing.T) { - t.Parallel() - - tries := 0 - f := func() *RetryError { - tries++ - if tries == 3 { - return nil - } - - return RetryableError(fmt.Errorf("error")) - } - - err := Retry(10*time.Second, f) - if err != nil { - t.Fatalf("err: %s", err) - } -} - -// make sure a slow StateRefreshFunc is allowed to complete after timeout -func TestRetry_grace(t *testing.T) { - t.Parallel() - - f := func() *RetryError { - time.Sleep(1 * time.Second) - return nil - } - - err := Retry(10*time.Millisecond, f) - if err != nil { - t.Fatalf("err: %s", err) - } -} - -func TestRetry_timeout(t *testing.T) { - t.Parallel() - - f := func() *RetryError { - return RetryableError(fmt.Errorf("always")) - } - - err := Retry(1*time.Second, f) - if err == nil { - t.Fatal("should error") - } -} - -func TestRetry_hang(t *testing.T) { - old := refreshGracePeriod - refreshGracePeriod = 50 * time.Millisecond - defer func() { - refreshGracePeriod = old - }() - - f := func() *RetryError { - time.Sleep(2 * time.Second) - return nil - } - - err := Retry(50*time.Millisecond, f) - if err == nil { - t.Fatal("should error") - } -} - -func TestRetry_error(t *testing.T) { - t.Parallel() - - expected := fmt.Errorf("nope") - f := func() *RetryError { - return NonRetryableError(expected) - } - - errCh := make(chan error) - go func() { - errCh <- Retry(1*time.Second, f) - }() - - select { - case err := <-errCh: - if err != expected { - t.Fatalf("bad: %#v", err) - } - case <-time.After(5 * time.Second): - t.Fatal("timeout") - } -} diff --git a/helper/validation/validation.go b/helper/validation/validation.go deleted file mode 100644 index 484f7d7da..000000000 --- a/helper/validation/validation.go +++ /dev/null @@ -1,49 +0,0 @@ -package validation - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/helper/schema" -) - -// IntBetween returns a SchemaValidateFunc which tests if the provided value -// is of type int and is between min and max (inclusive) -func IntBetween(min, max int) schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - v, ok := i.(int) - if !ok { - es = append(es, fmt.Errorf("expected type of %s to be int", k)) - return - } - - if v < min || v > max { - es = append(es, fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)) - return - } - - return - } -} - -// StringInSlice returns a SchemaValidateFunc which tests if the provided value -// is of type string and matches the value of an element in the valid slice -// will test with in lower case if ignoreCase is true -func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - v, ok := i.(string) - if !ok { - es = append(es, fmt.Errorf("expected type of %s to be string", k)) - return - } - - for _, str := range valid { - if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { - return - } - } - - es = append(es, fmt.Errorf("expected %s to be one of %v, got %s", k, valid, v)) - return - } -} diff --git a/helper/validation/validation_test.go b/helper/validation/validation_test.go deleted file mode 100644 index b47fe7824..000000000 --- a/helper/validation/validation_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package validation - -import ( - "regexp" - "testing" - - "github.com/hashicorp/terraform/helper/schema" -) - -type testCase struct { - val interface{} - f schema.SchemaValidateFunc - expectedErr *regexp.Regexp -} - -func TestValidationIntBetween(t *testing.T) { - runTestCases(t, []testCase{ - { - val: 1, - f: IntBetween(1, 1), - }, - { - val: 1, - f: IntBetween(0, 2), - }, - { - val: 1, - f: IntBetween(2, 3), - expectedErr: regexp.MustCompile("expected [\\w]+ to be in the range \\(2 - 3\\), got 1"), - }, - { - val: "1", - f: IntBetween(2, 3), - expectedErr: regexp.MustCompile("expected type of [\\w]+ to be int"), - }, - }) -} - -func TestValidationStringInSlice(t *testing.T) { - runTestCases(t, []testCase{ - { - val: "ValidValue", - f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, false), - }, - // ignore case - { - val: "VALIDVALUE", - f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, true), - }, - { - val: "VALIDVALUE", - f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, false), - expectedErr: regexp.MustCompile("expected [\\w]+ to be one of \\[ValidValue AnotherValidValue\\], got VALIDVALUE"), - }, - { - val: "InvalidValue", - f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, false), - expectedErr: regexp.MustCompile("expected [\\w]+ to be one of \\[ValidValue AnotherValidValue\\], got InvalidValue"), - }, - { - val: 1, - f: StringInSlice([]string{"ValidValue", "AnotherValidValue"}, false), - expectedErr: regexp.MustCompile("expected type of [\\w]+ to be string"), - }, - }) -} - -func runTestCases(t *testing.T, cases []testCase) { - matchErr := func(errs []error, r *regexp.Regexp) bool { - // err must match one provided - for _, err := range errs { - if r.MatchString(err.Error()) { - return true - } - } - - return false - } - - for i, tc := range cases { - _, errs := tc.f(tc.val, "test_property") - - if len(errs) == 0 && tc.expectedErr == nil { - continue - } - - if len(errs) != 0 && tc.expectedErr == nil { - t.Fatalf("expected test case %d to produce no errors, got %v", i, errs) - } - - if !matchErr(errs, tc.expectedErr) { - t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs) - } - } -} diff --git a/instances/expander_test.go b/instances/expander_test.go index e143d1b98..900d716d5 100644 --- a/instances/expander_test.go +++ b/instances/expander_test.go @@ -433,17 +433,6 @@ func TestExpander(t *testing.T) { }) } -func mustResourceAddr(str string) addrs.Resource { - addr, diags := addrs.ParseAbsResourceStr(str) - if diags.HasErrors() { - panic(fmt.Sprintf("invalid resource address: %s", diags.Err())) - } - if !addr.Module.IsRoot() { - panic("invalid resource address: includes module path") - } - return addr.Resource -} - func mustAbsResourceInstanceAddr(str string) addrs.AbsResourceInstance { addr, diags := addrs.ParseAbsResourceInstanceStr(str) if diags.HasErrors() { diff --git a/internal/depsfile/locks_file.go b/internal/depsfile/locks_file.go index 1fd7f5586..2c38175a3 100644 --- a/internal/depsfile/locks_file.go +++ b/internal/depsfile/locks_file.go @@ -2,7 +2,6 @@ package depsfile import ( "fmt" - "os" "sort" "github.com/hashicorp/hcl/v2" @@ -31,14 +30,40 @@ import ( // If the returned diagnostics contains errors then the returned Locks may // be incomplete or invalid. func LoadLocksFromFile(filename string) (*Locks, tfdiags.Diagnostics) { + return loadLocks(func(parser *hclparse.Parser) (*hcl.File, hcl.Diagnostics) { + return parser.ParseHCLFile(filename) + }) +} + +// LoadLocksFromBytes reads locks from the given byte array, pretending that +// it was read from the given filename. +// +// The constraints and behaviors are otherwise the same as for +// LoadLocksFromFile. LoadLocksFromBytes is primarily to allow more convenient +// integration testing (avoiding creating temporary files on disk); if you +// are writing non-test code, consider whether LoadLocksFromFile might be +// more appropriate to call. +func LoadLocksFromBytes(src []byte, filename string) (*Locks, tfdiags.Diagnostics) { + return loadLocks(func(parser *hclparse.Parser) (*hcl.File, hcl.Diagnostics) { + return parser.ParseHCL(src, filename) + }) +} + +func loadLocks(loadParse func(*hclparse.Parser) (*hcl.File, hcl.Diagnostics)) (*Locks, tfdiags.Diagnostics) { ret := NewLocks() var diags tfdiags.Diagnostics parser := hclparse.NewParser() - f, hclDiags := parser.ParseHCLFile(filename) + f, hclDiags := loadParse(parser) ret.sources = parser.Sources() diags = diags.Append(hclDiags) + if f == nil { + // If we encountered an error loading the file then those errors + // should already be in diags from the above, but the file might + // also be nil itself and so we can't decode from it. + return ret, diags + } moreDiags := decodeLocksFromHCL(ret, f.Body) diags = diags.Append(moreDiags) @@ -108,7 +133,7 @@ func SaveLocksToFile(locks *Locks, filename string) tfdiags.Diagnostics { newContent := f.Bytes() - err := replacefile.AtomicWriteFile(filename, newContent, os.ModePerm) + err := replacefile.AtomicWriteFile(filename, newContent, 0644) if err != nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, diff --git a/internal/depsfile/locks_file_test.go b/internal/depsfile/locks_file_test.go index 6019a0715..9e0d0f429 100644 --- a/internal/depsfile/locks_file_test.go +++ b/internal/depsfile/locks_file_test.go @@ -159,6 +159,45 @@ func TestLoadLocksFromFile(t *testing.T) { } } +func TestLoadLocksFromFileAbsent(t *testing.T) { + t.Run("lock file is a directory", func(t *testing.T) { + // This can never happen when Terraform is the one generating the + // lock file, but might arise if the user makes a directory with the + // lock file's name for some reason. (There is no actual reason to do + // so, so that would always be a mistake.) + locks, diags := LoadLocksFromFile("testdata") + if len(locks.providers) != 0 { + t.Errorf("returned locks has providers; expected empty locks") + } + if !diags.HasErrors() { + t.Fatalf("LoadLocksFromFile succeeded; want error") + } + // This is a generic error message from HCL itself, so upgrading HCL + // in future might cause a different error message here. + want := `Failed to read file: The configuration file "testdata" could not be read.` + got := diags.Err().Error() + if got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("lock file doesn't exist", func(t *testing.T) { + locks, diags := LoadLocksFromFile("testdata/nonexist.hcl") + if len(locks.providers) != 0 { + t.Errorf("returned locks has providers; expected empty locks") + } + if !diags.HasErrors() { + t.Fatalf("LoadLocksFromFile succeeded; want error") + } + // This is a generic error message from HCL itself, so upgrading HCL + // in future might cause a different error message here. + want := `Failed to read file: The configuration file "testdata/nonexist.hcl" could not be read.` + got := diags.Err().Error() + if got != want { + t.Errorf("wrong error message\ngot: %s\nwant: %s", got, want) + } + }) +} + func TestSaveLocksToFile(t *testing.T) { locks := NewLocks() @@ -193,6 +232,14 @@ func TestSaveLocksToFile(t *testing.T) { t.Fatalf("unexpected errors\n%s", diags.Err().Error()) } + fileInfo, err := os.Stat(filename) + if err != nil { + t.Fatalf(err.Error()) + } + if mode := fileInfo.Mode(); mode&0111 != 0 { + t.Fatalf("Expected lock file to be non-executable: %o", mode) + } + gotContentBytes, err := ioutil.ReadFile(filename) if err != nil { t.Fatalf(err.Error()) diff --git a/internal/earlyconfig/config.go b/internal/earlyconfig/config.go index ff563d4d3..2e6d0a90a 100644 --- a/internal/earlyconfig/config.go +++ b/internal/earlyconfig/config.go @@ -158,8 +158,8 @@ func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics for name, reqs := range c.Module.RequiredProviders { var fqn addrs.Provider if source := reqs.Source; source != "" { - addr, diags := addrs.ParseProviderSourceString(source) - if diags.HasErrors() { + addr, parseDiags := addrs.ParseProviderSourceString(source) + if parseDiags.HasErrors() { diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ Severity: tfconfig.DiagError, Summary: "Invalid provider source", diff --git a/internal/getproviders/didyoumean.go b/internal/getproviders/didyoumean.go new file mode 100644 index 000000000..9418330c1 --- /dev/null +++ b/internal/getproviders/didyoumean.go @@ -0,0 +1,253 @@ +package getproviders + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "path" + + "github.com/hashicorp/go-retryablehttp" + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform/addrs" +) + +// MissingProviderSuggestion takes a provider address that failed installation +// due to the remote registry reporting that it didn't exist, and attempts +// to find another provider that the user might have meant to select. +// +// If the result is equal to the given address then that indicates that there +// is no suggested alternative to offer, either because the function +// successfully determined there is no recorded alternative or because the +// lookup failed somehow. We don't consider a failure to find a suggestion +// as an installation failure, because the caller should already be reporting +// that the provider didn't exist anyway and this is only extra context for +// that error message. +// +// The result of this is a best effort, so any UI presenting it should be +// careful to give it only as a possibility and not necessarily a suitable +// replacement for the given provider. +// +// In practice today this function only knows how to suggest alternatives for +// "default" providers, which is to say ones that are in the hashicorp +// namespace in the Terraform registry. It will always return no result for +// any other provider. That might change in future if we introduce other ways +// to discover provider suggestions. +// +// If the given context is cancelled then this function might not return a +// renaming suggestion even if one would've been available for a completed +// request. +func MissingProviderSuggestion(ctx context.Context, addr addrs.Provider, source Source) addrs.Provider { + if !addr.IsDefault() { + return addr + } + + // Our strategy here, for a default provider, is to use the default + // registry's special API for looking up "legacy" providers and try looking + // for a legacy provider whose type name matches the type of the given + // provider. This should then find a suitable answer for any provider + // that was originally auto-installable in v0.12 and earlier but moved + // into a non-default namespace as part of introducing the heirarchical + // provider namespace. + // + // To achieve that, we need to find the direct registry client in + // particular from the given source, because that is the only Source + // implementation that can actually handle a legacy provider lookup. + regSource := findLegacyProviderLookupSource(addr.Hostname, source) + if regSource == nil { + // If there's no direct registry source in the installation config + // then we can't provide a renaming suggestion. + return addr + } + + defaultNS, redirectNS, err := regSource.lookupLegacyProviderNamespace(ctx, addr.Hostname, addr.Type) + if err != nil { + return addr + } + + switch { + case redirectNS != "": + return addrs.Provider{ + Hostname: addr.Hostname, + Namespace: redirectNS, + Type: addr.Type, + } + default: + return addrs.Provider{ + Hostname: addr.Hostname, + Namespace: defaultNS, + Type: addr.Type, + } + } +} + +// findLegacyProviderLookupSource tries to find a *RegistrySource that can talk +// to the given registry host in the given Source. It might be given directly, +// or it might be given indirectly via a MultiSource where the selector +// includes a wildcard for registry.terraform.io. +// +// Returns nil if the given source does not have any configured way to talk +// directly to the given host. +// +// If the given source contains multiple sources that can talk to the given +// host directly, the first one in the sequence takes preference. In practice +// it's pointless to have two direct installation sources that match the same +// hostname anyway, so this shouldn't arise in normal use. +func findLegacyProviderLookupSource(host svchost.Hostname, source Source) *RegistrySource { + switch source := source.(type) { + + case *RegistrySource: + // Easy case: the source is a registry source directly, and so we'll + // just use it. + return source + + case *MemoizeSource: + // Also easy: the source is a memoize wrapper, so defer to its + // underlying source. + return findLegacyProviderLookupSource(host, source.underlying) + + case MultiSource: + // Trickier case: if it's a multisource then we need to scan over + // its selectors until we find one that is a *RegistrySource _and_ + // that is configured to accept arbitrary providers from the + // given hostname. + + // For our matching purposes we'll use an address that would not be + // valid as a real provider FQN and thus can only match a selector + // that has no filters at all or a selector that wildcards everything + // except the hostname, like "registry.terraform.io/*/*" + matchAddr := addrs.Provider{ + Hostname: host, + // Other fields are intentionally left empty, to make this invalid + // as a specific provider address. + } + + for _, selector := range source { + // If this source has suitable matching patterns to install from + // the given hostname then we'll recursively search inside it + // for *RegistrySource objects. + if selector.CanHandleProvider(matchAddr) { + ret := findLegacyProviderLookupSource(host, selector.Source) + if ret != nil { + return ret + } + } + } + + // If we get here then there were no selectors that are both configured + // to handle modules from the given hostname and that are registry + // sources, so we fail. + return nil + + default: + // This source cannot be and cannot contain a *RegistrySource, so + // we fail. + return nil + } +} + +// lookupLegacyProviderNamespace is a special method available only on +// RegistrySource which can deal with legacy provider addresses that contain +// only a type and leave the namespace implied. +// +// It asks the registry at the given hostname to provide a default namespace +// for the given provider type, which can be combined with the given hostname +// and type name to produce a fully-qualified provider address. +// +// Not all unqualified type names can be resolved to a default namespace. If +// the request fails, this method returns an error describing the failure. +// +// This method exists only to allow compatibility with unqualified names +// in older configurations. New configurations should be written so as not to +// depend on it, and this fallback mechanism will likely be removed altogether +// in a future Terraform version. +func (s *RegistrySource) lookupLegacyProviderNamespace(ctx context.Context, hostname svchost.Hostname, typeName string) (string, string, error) { + client, err := s.registryClient(hostname) + if err != nil { + return "", "", err + } + return client.legacyProviderDefaultNamespace(ctx, typeName) +} + +// legacyProviderDefaultNamespace returns the raw address strings produced by +// the registry when asked about the given unqualified provider type name. +// The returned namespace string is taken verbatim from the registry's response. +// +// This method exists only to allow compatibility with unqualified names +// in older configurations. New configurations should be written so as not to +// depend on it. +func (c *registryClient) legacyProviderDefaultNamespace(ctx context.Context, typeName string) (string, string, error) { + endpointPath, err := url.Parse(path.Join("-", typeName, "versions")) + if err != nil { + // Should never happen because we're constructing this from + // already-validated components. + return "", "", err + } + endpointURL := c.baseURL.ResolveReference(endpointPath) + + req, err := retryablehttp.NewRequest("GET", endpointURL.String(), nil) + if err != nil { + return "", "", err + } + req = req.WithContext(ctx) + c.addHeadersToRequest(req.Request) + + // This is just to give us something to return in error messages. It's + // not a proper provider address. + placeholderProviderAddr := addrs.NewLegacyProvider(typeName) + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", "", c.errQueryFailed(placeholderProviderAddr, err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // Great! + case http.StatusNotFound: + return "", "", ErrProviderNotFound{ + Provider: placeholderProviderAddr, + } + case http.StatusUnauthorized, http.StatusForbidden: + return "", "", c.errUnauthorized(placeholderProviderAddr.Hostname) + default: + return "", "", c.errQueryFailed(placeholderProviderAddr, errors.New(resp.Status)) + } + + type ResponseBody struct { + Id string `json:"id"` + MovedTo string `json:"moved_to"` + } + var body ResponseBody + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&body); err != nil { + return "", "", c.errQueryFailed(placeholderProviderAddr, err) + } + + provider, diags := addrs.ParseProviderSourceString(body.Id) + if diags.HasErrors() { + return "", "", fmt.Errorf("Error parsing provider ID from Registry: %s", diags.Err()) + } + + if provider.Type != typeName { + return "", "", fmt.Errorf("Registry returned provider with type %q, expected %q", provider.Type, typeName) + } + + var movedTo addrs.Provider + if body.MovedTo != "" { + movedTo, diags = addrs.ParseProviderSourceString(body.MovedTo) + if diags.HasErrors() { + return "", "", fmt.Errorf("Error parsing provider ID from Registry: %s", diags.Err()) + } + + if movedTo.Type != typeName { + return "", "", fmt.Errorf("Registry returned provider with type %q, expected %q", movedTo.Type, typeName) + } + } + + return provider.Namespace, movedTo.Namespace, nil +} diff --git a/internal/getproviders/didyoumean_test.go b/internal/getproviders/didyoumean_test.go new file mode 100644 index 000000000..05c315018 --- /dev/null +++ b/internal/getproviders/didyoumean_test.go @@ -0,0 +1,128 @@ +package getproviders + +import ( + "context" + "testing" + + svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform/addrs" +) + +func TestMissingProviderSuggestion(t *testing.T) { + // Most of these test cases rely on specific "magic" provider addresses + // that are implemented by the fake registry source returned by + // testRegistrySource. Refer to that function for more details on how + // they work. + + t.Run("happy path", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // testRegistrySource handles -/legacy as a valid legacy provider + // lookup mapping to legacycorp/legacy. + got := MissingProviderSuggestion( + ctx, + addrs.NewDefaultProvider("legacy"), + source, + ) + + want := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "legacycorp", + Type: "legacy", + } + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("provider moved", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // testRegistrySource handles -/moved as a valid legacy provider + // lookup mapping to hashicorp/moved but with an additional "redirect" + // to acme/moved. This mimics how for some providers there is both + // a copy under terraform-providers for v0.12 compatibility _and_ a + // copy in some other namespace for v0.13 or later to use. Our naming + // suggestions ignore the v0.12-compatible one and suggest the + // other one. + got := MissingProviderSuggestion( + ctx, + addrs.NewDefaultProvider("moved"), + source, + ) + + want := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "acme", + Type: "moved", + } + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("invalid response", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // testRegistrySource handles -/invalid by returning an invalid + // provider address, which MissingProviderSuggestion should reject + // and behave as if there was no suggestion available. + want := addrs.NewDefaultProvider("invalid") + got := MissingProviderSuggestion( + ctx, + want, + source, + ) + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("another registry", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // Because this provider address isn't on registry.terraform.io, + // MissingProviderSuggestion won't even attempt to make a suggestion + // for it. + want := addrs.Provider{ + Hostname: svchost.Hostname("example.com"), + Namespace: "whatever", + Type: "foo", + } + got := MissingProviderSuggestion( + ctx, + want, + source, + ) + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) + t.Run("another namespace", func(t *testing.T) { + ctx := context.Background() + source, _, close := testRegistrySource(t) + defer close() + + // Because this provider address isn't in + // registry.terraform.io/hashicorp/..., MissingProviderSuggestion won't + // even attempt to make a suggestion for it. + want := addrs.Provider{ + Hostname: defaultRegistryHost, + Namespace: "whatever", + Type: "foo", + } + got := MissingProviderSuggestion( + ctx, + want, + source, + ) + if got != want { + t.Errorf("wrong result\ngot: %s\nwant: %s", got, want) + } + }) +} diff --git a/internal/getproviders/filesystem_search.go b/internal/getproviders/filesystem_search.go index 9c0e7e8f1..ef7e73997 100644 --- a/internal/getproviders/filesystem_search.go +++ b/internal/getproviders/filesystem_search.go @@ -120,7 +120,8 @@ func SearchLocalDirectory(baseDir string) (map[addrs.Provider]PackageMetaList, e // filesystem object below. info, err = os.Stat(fullPath) if err != nil { - return fmt.Errorf("failed to read metadata about %s: %s", fullPath, err) + log.Printf("[WARN] failed to read metadata about %s: %s", fullPath, err) + return nil } switch len(parts) { diff --git a/internal/getproviders/registry_client.go b/internal/getproviders/registry_client.go index befa74274..7608fb806 100644 --- a/internal/getproviders/registry_client.go +++ b/internal/getproviders/registry_client.go @@ -261,7 +261,7 @@ func (c *registryClient) PackageMeta(ctx context.Context, provider addrs.Provide match = true } } - if match == false { + if !match { // If the protocol version is not supported, try to find the closest // matching version. closest, err := c.findClosestProtocolCompatibleVersion(ctx, provider, version) diff --git a/internal/getproviders/types.go b/internal/getproviders/types.go index 8a5b382e4..c71fc5893 100644 --- a/internal/getproviders/types.go +++ b/internal/getproviders/types.go @@ -407,7 +407,18 @@ func VersionConstraintsString(spec VersionConstraints) string { // and sort them into a consistent order. sels := make(map[constraints.SelectionSpec]struct{}) for _, sel := range spec { - sels[sel] = struct{}{} + // The parser allows writing abbreviated version (such as 2) which + // end up being represented in memory with trailing unconstrained parts + // (for example 2.*.*). For the purpose of serialization with Ruby + // style syntax, these unconstrained parts can all be represented as 0 + // with no loss of meaning, so we make that conversion here. Doing so + // allows us to deduplicate equivalent constraints, such as >= 2.0 and + // >= 2.0.0. + normalizedSel := constraints.SelectionSpec{ + Operator: sel.Operator, + Boundary: sel.Boundary.ConstrainToZero(), + } + sels[normalizedSel] = struct{}{} } selsOrder := make([]constraints.SelectionSpec, 0, len(sels)) for sel := range sels { @@ -450,34 +461,26 @@ func VersionConstraintsString(spec VersionConstraints) string { b.WriteString("??? ") } - // The parser allows writing abbreviated version (such as 2) which - // end up being represented in memory with trailing unconstrained parts - // (for example 2.*.*). For the purpose of serialization with Ruby - // style syntax, these unconstrained parts can all be represented as 0 - // with no loss of meaning, so we make that conversion here. - // - // This is possible because we use a different constraint operator to - // distinguish between the two types of pessimistic constraint: - // minor-only and patch-only. For minor-only constraints, we always - // want to display only the major and minor version components, so we - // special-case that operator below. + // We use a different constraint operator to distinguish between the + // two types of pessimistic constraint: minor-only and patch-only. For + // minor-only constraints, we always want to display only the major and + // minor version components, so we special-case that operator below. // // One final edge case is a minor-only constraint specified with only // the major version, such as ~> 2. We treat this the same as ~> 2.0, // because a major-only pessimistic constraint does not exist: it is // logically identical to >= 2.0.0. - boundary := sel.Boundary.ConstrainToZero() if sel.Operator == constraints.OpGreaterThanOrEqualMinorOnly { // The minor-pessimistic syntax uses only two version components. - fmt.Fprintf(&b, "%s.%s", boundary.Major, boundary.Minor) + fmt.Fprintf(&b, "%s.%s", sel.Boundary.Major, sel.Boundary.Minor) } else { - fmt.Fprintf(&b, "%s.%s.%s", boundary.Major, boundary.Minor, boundary.Patch) + fmt.Fprintf(&b, "%s.%s.%s", sel.Boundary.Major, sel.Boundary.Minor, sel.Boundary.Patch) } if sel.Boundary.Prerelease != "" { - b.WriteString("-" + boundary.Prerelease) + b.WriteString("-" + sel.Boundary.Prerelease) } if sel.Boundary.Metadata != "" { - b.WriteString("+" + boundary.Metadata) + b.WriteString("+" + sel.Boundary.Metadata) } } return b.String() diff --git a/internal/getproviders/types_test.go b/internal/getproviders/types_test.go index fb8c7669c..b12cc2155 100644 --- a/internal/getproviders/types_test.go +++ b/internal/getproviders/types_test.go @@ -53,6 +53,10 @@ func TestVersionConstraintsString(t *testing.T) { MustParseVersionConstraints(">= 1.2.3, 1.2.3, ~> 1.2, 1.2.3"), "~> 1.2, >= 1.2.3, 1.2.3", }, + "equivalent duplicates removed": { + MustParseVersionConstraints(">= 2.68, >= 2.68.0"), + ">= 2.68.0", + }, "consistent ordering, exhaustive": { // This weird jumble is just to exercise the different sort // ordering codepaths. Hopefully nothing quite this horrific diff --git a/internal/grpcwrap/provider.go b/internal/grpcwrap/provider.go new file mode 100644 index 000000000..6d7da068d --- /dev/null +++ b/internal/grpcwrap/provider.go @@ -0,0 +1,415 @@ +package grpcwrap + +import ( + "context" + + "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/providers" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" +) + +// New wraps a providers.Interface to implement a grpc ProviderServer. +// This is useful for creating a test binary out of an internal provider +// implementation. +func Provider(p providers.Interface) tfplugin5.ProviderServer { + return &provider{ + provider: p, + schema: p.GetSchema(), + } +} + +type provider struct { + provider providers.Interface + schema providers.GetSchemaResponse +} + +func (p *provider) GetSchema(_ context.Context, req *tfplugin5.GetProviderSchema_Request) (*tfplugin5.GetProviderSchema_Response, error) { + resp := &tfplugin5.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*tfplugin5.Schema), + DataSourceSchemas: make(map[string]*tfplugin5.Schema), + } + + resp.Provider = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + if p.schema.Provider.Block != nil { + resp.Provider.Block = convert.ConfigSchemaToProto(p.schema.Provider.Block) + } + + resp.ProviderMeta = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + if p.schema.ProviderMeta.Block != nil { + resp.ProviderMeta.Block = convert.ConfigSchemaToProto(p.schema.ProviderMeta.Block) + } + + for typ, res := range p.schema.ResourceTypes { + resp.ResourceSchemas[typ] = &tfplugin5.Schema{ + Version: res.Version, + Block: convert.ConfigSchemaToProto(res.Block), + } + } + for typ, dat := range p.schema.DataSources { + resp.DataSourceSchemas[typ] = &tfplugin5.Schema{ + Version: dat.Version, + Block: convert.ConfigSchemaToProto(dat.Block), + } + } + + // include any diagnostics from the original GetSchema call + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, p.schema.Diagnostics) + + return resp, nil +} + +func (p *provider) PrepareProviderConfig(_ context.Context, req *tfplugin5.PrepareProviderConfig_Request) (*tfplugin5.PrepareProviderConfig_Response, error) { + resp := &tfplugin5.PrepareProviderConfig_Response{} + ty := p.schema.Provider.Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + prepareResp := p.provider.PrepareProviderConfig(providers.PrepareProviderConfigRequest{ + Config: configVal, + }) + + // the PreparedConfig value is no longer used + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, prepareResp.Diagnostics) + return resp, nil +} + +func (p *provider) ValidateResourceTypeConfig(_ context.Context, req *tfplugin5.ValidateResourceTypeConfig_Request) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { + resp := &tfplugin5.ValidateResourceTypeConfig_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provider.ValidateResourceTypeConfig(providers.ValidateResourceTypeConfigRequest{ + TypeName: req.TypeName, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provider) ValidateDataSourceConfig(_ context.Context, req *tfplugin5.ValidateDataSourceConfig_Request) (*tfplugin5.ValidateDataSourceConfig_Response, error) { + resp := &tfplugin5.ValidateDataSourceConfig_Response{} + ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provider.ValidateDataSourceConfig(providers.ValidateDataSourceConfigRequest{ + TypeName: req.TypeName, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provider) UpgradeResourceState(_ context.Context, req *tfplugin5.UpgradeResourceState_Request) (*tfplugin5.UpgradeResourceState_Response, error) { + resp := &tfplugin5.UpgradeResourceState_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + upgradeResp := p.provider.UpgradeResourceState(providers.UpgradeResourceStateRequest{ + TypeName: req.TypeName, + Version: req.Version, + RawStateJSON: req.RawState.Json, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, upgradeResp.Diagnostics) + if upgradeResp.Diagnostics.HasErrors() { + return resp, nil + } + + dv, err := encodeDynamicValue(upgradeResp.UpgradedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = dv + + return resp, nil +} + +func (p *provider) Configure(_ context.Context, req *tfplugin5.Configure_Request) (*tfplugin5.Configure_Response, error) { + resp := &tfplugin5.Configure_Response{} + ty := p.schema.Provider.Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configureResp := p.provider.Configure(providers.ConfigureRequest{ + TerraformVersion: req.TerraformVersion, + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, configureResp.Diagnostics) + return resp, nil +} + +func (p *provider) ReadResource(_ context.Context, req *tfplugin5.ReadResource_Request) (*tfplugin5.ReadResource_Response, error) { + resp := &tfplugin5.ReadResource_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + stateVal, err := decodeDynamicValue(req.CurrentState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + readResp := p.provider.ReadResource(providers.ReadResourceRequest{ + TypeName: req.TypeName, + PriorState: stateVal, + Private: req.Private, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) + if readResp.Diagnostics.HasErrors() { + return resp, nil + } + resp.Private = readResp.Private + + dv, err := encodeDynamicValue(readResp.NewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = dv + + return resp, nil +} + +func (p *provider) PlanResourceChange(_ context.Context, req *tfplugin5.PlanResourceChange_Request) (*tfplugin5.PlanResourceChange_Response, error) { + resp := &tfplugin5.PlanResourceChange_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + priorStateVal, err := decodeDynamicValue(req.PriorState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + proposedStateVal, err := decodeDynamicValue(req.ProposedNewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + planResp := p.provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: req.TypeName, + PriorState: priorStateVal, + ProposedNewState: proposedStateVal, + Config: configVal, + PriorPrivate: req.PriorPrivate, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, planResp.Diagnostics) + if planResp.Diagnostics.HasErrors() { + return resp, nil + } + + resp.PlannedPrivate = planResp.PlannedPrivate + + resp.PlannedState, err = encodeDynamicValue(planResp.PlannedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, path := range planResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.PathToAttributePath(path)) + } + + return resp, nil +} + +func (p *provider) ApplyResourceChange(_ context.Context, req *tfplugin5.ApplyResourceChange_Request) (*tfplugin5.ApplyResourceChange_Response, error) { + resp := &tfplugin5.ApplyResourceChange_Response{} + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + + priorStateVal, err := decodeDynamicValue(req.PriorState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := decodeDynamicValue(req.PlannedState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + applyResp := p.provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: req.TypeName, + PriorState: priorStateVal, + PlannedState: plannedStateVal, + Config: configVal, + PlannedPrivate: req.PlannedPrivate, + ProviderMeta: metaVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, applyResp.Diagnostics) + if applyResp.Diagnostics.HasErrors() { + return resp, nil + } + resp.Private = applyResp.Private + + resp.NewState, err = encodeDynamicValue(applyResp.NewState, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + return resp, nil +} + +func (p *provider) ImportResourceState(_ context.Context, req *tfplugin5.ImportResourceState_Request) (*tfplugin5.ImportResourceState_Response, error) { + resp := &tfplugin5.ImportResourceState_Response{} + + importResp := p.provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: req.TypeName, + ID: req.Id, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, importResp.Diagnostics) + + for _, res := range importResp.ImportedResources { + ty := p.schema.ResourceTypes[res.TypeName].Block.ImpliedType() + state, err := encodeDynamicValue(res.State, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + continue + } + + resp.ImportedResources = append(resp.ImportedResources, &tfplugin5.ImportResourceState_ImportedResource{ + TypeName: res.TypeName, + State: state, + Private: res.Private, + }) + } + + return resp, nil +} + +func (p *provider) ReadDataSource(_ context.Context, req *tfplugin5.ReadDataSource_Request) (*tfplugin5.ReadDataSource_Response, error) { + resp := &tfplugin5.ReadDataSource_Response{} + ty := p.schema.DataSources[req.TypeName].Block.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + metaTy := p.schema.ProviderMeta.Block.ImpliedType() + metaVal, err := decodeDynamicValue(req.ProviderMeta, metaTy) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + readResp := p.provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: req.TypeName, + Config: configVal, + ProviderMeta: metaVal, + }) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, readResp.Diagnostics) + if readResp.Diagnostics.HasErrors() { + return resp, nil + } + + resp.State, err = encodeDynamicValue(readResp.State, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + return resp, nil +} + +func (p *provider) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { + resp := &tfplugin5.Stop_Response{} + err := p.provider.Stop() + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +// decode a DynamicValue from either the JSON or MsgPack encoding. +func decodeDynamicValue(v *tfplugin5.DynamicValue, ty cty.Type) (cty.Value, error) { + // always return a valid value + var err error + res := cty.NullVal(ty) + if v == nil { + return res, nil + } + + switch { + case len(v.Msgpack) > 0: + res, err = msgpack.Unmarshal(v.Msgpack, ty) + case len(v.Json) > 0: + res, err = ctyjson.Unmarshal(v.Json, ty) + } + return res, err +} + +// encode a cty.Value into a DynamicValue msgpack payload. +func encodeDynamicValue(v cty.Value, ty cty.Type) (*tfplugin5.DynamicValue, error) { + mp, err := msgpack.Marshal(v, ty) + return &tfplugin5.DynamicValue{ + Msgpack: mp, + }, err +} diff --git a/internal/grpcwrap/provisioner.go b/internal/grpcwrap/provisioner.go new file mode 100644 index 000000000..1fffc40ac --- /dev/null +++ b/internal/grpcwrap/provisioner.go @@ -0,0 +1,116 @@ +package grpcwrap + +import ( + "context" + "log" + "strings" + "unicode/utf8" + + "github.com/hashicorp/terraform/communicator/shared" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/provisioners" +) + +// New wraps a providers.Interface to implement a grpc ProviderServer. +// This is useful for creating a test binary out of an internal provider +// implementation. +func Provisioner(p provisioners.Interface) tfplugin5.ProvisionerServer { + return &provisioner{ + provisioner: p, + schema: p.GetSchema().Provisioner, + } +} + +type provisioner struct { + provisioner provisioners.Interface + schema *configschema.Block +} + +func (p *provisioner) GetSchema(_ context.Context, req *tfplugin5.GetProvisionerSchema_Request) (*tfplugin5.GetProvisionerSchema_Response, error) { + resp := &tfplugin5.GetProvisionerSchema_Response{} + + resp.Provisioner = &tfplugin5.Schema{ + Block: &tfplugin5.Schema_Block{}, + } + + if p.schema != nil { + resp.Provisioner.Block = convert.ConfigSchemaToProto(p.schema) + } + + return resp, nil +} + +func (p *provisioner) ValidateProvisionerConfig(_ context.Context, req *tfplugin5.ValidateProvisionerConfig_Request) (*tfplugin5.ValidateProvisionerConfig_Response, error) { + resp := &tfplugin5.ValidateProvisionerConfig_Response{} + ty := p.schema.ImpliedType() + + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + validateResp := p.provisioner.ValidateProvisionerConfig(provisioners.ValidateProvisionerConfigRequest{ + Config: configVal, + }) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, validateResp.Diagnostics) + return resp, nil +} + +func (p *provisioner) ProvisionResource(req *tfplugin5.ProvisionResource_Request, srv tfplugin5.Provisioner_ProvisionResourceServer) error { + // We send back a diagnostics over the stream if there was a + // provisioner-side problem. + srvResp := &tfplugin5.ProvisionResource_Response{} + + ty := p.schema.ImpliedType() + configVal, err := decodeDynamicValue(req.Config, ty) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + + connVal, err := decodeDynamicValue(req.Connection, shared.ConnectionBlockSupersetSchema.ImpliedType()) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + + resp := p.provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: configVal, + Connection: connVal, + UIOutput: uiOutput{srv}, + }) + + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, resp.Diagnostics) + srv.Send(srvResp) + return nil +} + +func (p *provisioner) Stop(context.Context, *tfplugin5.Stop_Request) (*tfplugin5.Stop_Response, error) { + resp := &tfplugin5.Stop_Response{} + err := p.provisioner.Stop() + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +// uiOutput implements the terraform.UIOutput interface to adapt the grpc +// stream to the legacy Provisioner.Apply method. +type uiOutput struct { + srv tfplugin5.Provisioner_ProvisionResourceServer +} + +func (o uiOutput) Output(s string) { + err := o.srv.Send(&tfplugin5.ProvisionResource_Response{ + Output: strings.ToValidUTF8(s, string(utf8.RuneError)), + }) + if err != nil { + log.Printf("[ERROR] %s", err) + } +} diff --git a/helper/slowmessage/slowmessage.go b/internal/helper/slowmessage/slowmessage.go similarity index 100% rename from helper/slowmessage/slowmessage.go rename to internal/helper/slowmessage/slowmessage.go diff --git a/helper/slowmessage/slowmessage_test.go b/internal/helper/slowmessage/slowmessage_test.go similarity index 100% rename from helper/slowmessage/slowmessage_test.go rename to internal/helper/slowmessage/slowmessage_test.go diff --git a/helper/wrappedreadline/wrappedreadline.go b/internal/helper/wrappedreadline/wrappedreadline.go similarity index 96% rename from helper/wrappedreadline/wrappedreadline.go rename to internal/helper/wrappedreadline/wrappedreadline.go index 86d7b9215..6d2ffd15f 100644 --- a/helper/wrappedreadline/wrappedreadline.go +++ b/internal/helper/wrappedreadline/wrappedreadline.go @@ -14,7 +14,7 @@ import ( "github.com/chzyer/readline" - "github.com/hashicorp/terraform/helper/wrappedstreams" + "github.com/hashicorp/terraform/internal/helper/wrappedstreams" ) // Override overrides the values in readline.Config that need to be diff --git a/helper/wrappedreadline/wrappedreadline_unix.go b/internal/helper/wrappedreadline/wrappedreadline_unix.go similarity index 91% rename from helper/wrappedreadline/wrappedreadline_unix.go rename to internal/helper/wrappedreadline/wrappedreadline_unix.go index 4e410c7b7..00cf29320 100644 --- a/helper/wrappedreadline/wrappedreadline_unix.go +++ b/internal/helper/wrappedreadline/wrappedreadline_unix.go @@ -6,7 +6,7 @@ import ( "syscall" "unsafe" - "github.com/hashicorp/terraform/helper/wrappedstreams" + "github.com/hashicorp/terraform/internal/helper/wrappedstreams" ) // getWidth impl for Unix diff --git a/helper/wrappedreadline/wrappedreadline_windows.go b/internal/helper/wrappedreadline/wrappedreadline_windows.go similarity index 100% rename from helper/wrappedreadline/wrappedreadline_windows.go rename to internal/helper/wrappedreadline/wrappedreadline_windows.go diff --git a/helper/wrappedstreams/streams.go b/internal/helper/wrappedstreams/streams.go similarity index 93% rename from helper/wrappedstreams/streams.go rename to internal/helper/wrappedstreams/streams.go index b661ed732..1ccc43973 100644 --- a/helper/wrappedstreams/streams.go +++ b/internal/helper/wrappedstreams/streams.go @@ -35,7 +35,7 @@ func fds() (stdin, stdout, stderr *os.File) { return } -// These are the wrapped standard streams. These are setup by the +// These are the wrapped standard streams. These are set up by the // platform specific code in initPlatform. var ( wrappedStdin *os.File diff --git a/helper/wrappedstreams/streams_other.go b/internal/helper/wrappedstreams/streams_other.go similarity index 100% rename from helper/wrappedstreams/streams_other.go rename to internal/helper/wrappedstreams/streams_other.go diff --git a/helper/wrappedstreams/streams_windows.go b/internal/helper/wrappedstreams/streams_windows.go similarity index 100% rename from helper/wrappedstreams/streams_windows.go rename to internal/helper/wrappedstreams/streams_windows.go diff --git a/helper/acctest/acctest.go b/internal/legacy/helper/acctest/acctest.go similarity index 100% rename from helper/acctest/acctest.go rename to internal/legacy/helper/acctest/acctest.go diff --git a/helper/acctest/random.go b/internal/legacy/helper/acctest/random.go similarity index 100% rename from helper/acctest/random.go rename to internal/legacy/helper/acctest/random.go diff --git a/helper/acctest/random_test.go b/internal/legacy/helper/acctest/random_test.go similarity index 100% rename from helper/acctest/random_test.go rename to internal/legacy/helper/acctest/random_test.go diff --git a/helper/acctest/remotetests.go b/internal/legacy/helper/acctest/remotetests.go similarity index 100% rename from helper/acctest/remotetests.go rename to internal/legacy/helper/acctest/remotetests.go diff --git a/helper/hashcode/hashcode.go b/internal/legacy/helper/hashcode/hashcode.go similarity index 100% rename from helper/hashcode/hashcode.go rename to internal/legacy/helper/hashcode/hashcode.go diff --git a/helper/hashcode/hashcode_test.go b/internal/legacy/helper/hashcode/hashcode_test.go similarity index 100% rename from helper/hashcode/hashcode_test.go rename to internal/legacy/helper/hashcode/hashcode_test.go diff --git a/helper/schema/README.md b/internal/legacy/helper/schema/README.md similarity index 100% rename from helper/schema/README.md rename to internal/legacy/helper/schema/README.md diff --git a/helper/schema/backend.go b/internal/legacy/helper/schema/backend.go similarity index 98% rename from helper/schema/backend.go rename to internal/legacy/helper/schema/backend.go index 42c2bed92..a7f440e02 100644 --- a/helper/schema/backend.go +++ b/internal/legacy/helper/schema/backend.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ctyconvert "github.com/zclconf/go-cty/cty/convert" ) diff --git a/helper/schema/backend_test.go b/internal/legacy/helper/schema/backend_test.go similarity index 100% rename from helper/schema/backend_test.go rename to internal/legacy/helper/schema/backend_test.go diff --git a/helper/schema/core_schema.go b/internal/legacy/helper/schema/core_schema.go similarity index 100% rename from helper/schema/core_schema.go rename to internal/legacy/helper/schema/core_schema.go diff --git a/helper/schema/core_schema_test.go b/internal/legacy/helper/schema/core_schema_test.go similarity index 100% rename from helper/schema/core_schema_test.go rename to internal/legacy/helper/schema/core_schema_test.go diff --git a/helper/schema/data_source_resource_shim.go b/internal/legacy/helper/schema/data_source_resource_shim.go similarity index 100% rename from helper/schema/data_source_resource_shim.go rename to internal/legacy/helper/schema/data_source_resource_shim.go diff --git a/helper/schema/equal.go b/internal/legacy/helper/schema/equal.go similarity index 100% rename from helper/schema/equal.go rename to internal/legacy/helper/schema/equal.go diff --git a/helper/schema/field_reader.go b/internal/legacy/helper/schema/field_reader.go similarity index 100% rename from helper/schema/field_reader.go rename to internal/legacy/helper/schema/field_reader.go diff --git a/helper/schema/field_reader_config.go b/internal/legacy/helper/schema/field_reader_config.go similarity index 99% rename from helper/schema/field_reader_config.go rename to internal/legacy/helper/schema/field_reader_config.go index 6ad3f13cb..f4a43d1fc 100644 --- a/helper/schema/field_reader_config.go +++ b/internal/legacy/helper/schema/field_reader_config.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/mitchellh/mapstructure" ) diff --git a/helper/schema/field_reader_config_test.go b/internal/legacy/helper/schema/field_reader_config_test.go similarity index 98% rename from helper/schema/field_reader_config_test.go rename to internal/legacy/helper/schema/field_reader_config_test.go index 5e2728250..53b8e9077 100644 --- a/helper/schema/field_reader_config_test.go +++ b/internal/legacy/helper/schema/field_reader_config_test.go @@ -7,8 +7,8 @@ import ( "testing" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/helper/hashcode" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestConfigFieldReader_impl(t *testing.T) { diff --git a/helper/schema/field_reader_diff.go b/internal/legacy/helper/schema/field_reader_diff.go similarity index 99% rename from helper/schema/field_reader_diff.go rename to internal/legacy/helper/schema/field_reader_diff.go index 3e70acf0b..84ebe272e 100644 --- a/helper/schema/field_reader_diff.go +++ b/internal/legacy/helper/schema/field_reader_diff.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/mitchellh/mapstructure" ) diff --git a/helper/schema/field_reader_diff_test.go b/internal/legacy/helper/schema/field_reader_diff_test.go similarity index 99% rename from helper/schema/field_reader_diff_test.go rename to internal/legacy/helper/schema/field_reader_diff_test.go index 49b05e862..1f6fa7da1 100644 --- a/helper/schema/field_reader_diff_test.go +++ b/internal/legacy/helper/schema/field_reader_diff_test.go @@ -4,7 +4,7 @@ import ( "reflect" "testing" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestDiffFieldReader_impl(t *testing.T) { diff --git a/helper/schema/field_reader_map.go b/internal/legacy/helper/schema/field_reader_map.go similarity index 100% rename from helper/schema/field_reader_map.go rename to internal/legacy/helper/schema/field_reader_map.go diff --git a/helper/schema/field_reader_map_test.go b/internal/legacy/helper/schema/field_reader_map_test.go similarity index 100% rename from helper/schema/field_reader_map_test.go rename to internal/legacy/helper/schema/field_reader_map_test.go diff --git a/helper/schema/field_reader_multi.go b/internal/legacy/helper/schema/field_reader_multi.go similarity index 100% rename from helper/schema/field_reader_multi.go rename to internal/legacy/helper/schema/field_reader_multi.go diff --git a/helper/schema/field_reader_multi_test.go b/internal/legacy/helper/schema/field_reader_multi_test.go similarity index 98% rename from helper/schema/field_reader_multi_test.go rename to internal/legacy/helper/schema/field_reader_multi_test.go index 85286a66e..7410335f6 100644 --- a/helper/schema/field_reader_multi_test.go +++ b/internal/legacy/helper/schema/field_reader_multi_test.go @@ -5,7 +5,7 @@ import ( "strconv" "testing" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestMultiLevelFieldReaderReadFieldExact(t *testing.T) { diff --git a/helper/schema/field_reader_test.go b/internal/legacy/helper/schema/field_reader_test.go similarity index 100% rename from helper/schema/field_reader_test.go rename to internal/legacy/helper/schema/field_reader_test.go diff --git a/helper/schema/field_writer.go b/internal/legacy/helper/schema/field_writer.go similarity index 100% rename from helper/schema/field_writer.go rename to internal/legacy/helper/schema/field_writer.go diff --git a/helper/schema/field_writer_map.go b/internal/legacy/helper/schema/field_writer_map.go similarity index 100% rename from helper/schema/field_writer_map.go rename to internal/legacy/helper/schema/field_writer_map.go diff --git a/helper/schema/field_writer_map_test.go b/internal/legacy/helper/schema/field_writer_map_test.go similarity index 100% rename from helper/schema/field_writer_map_test.go rename to internal/legacy/helper/schema/field_writer_map_test.go diff --git a/helper/schema/getsource_string.go b/internal/legacy/helper/schema/getsource_string.go similarity index 100% rename from helper/schema/getsource_string.go rename to internal/legacy/helper/schema/getsource_string.go diff --git a/helper/schema/provider.go b/internal/legacy/helper/schema/provider.go similarity index 99% rename from helper/schema/provider.go rename to internal/legacy/helper/schema/provider.go index 59dc750ee..24736566d 100644 --- a/helper/schema/provider.go +++ b/internal/legacy/helper/schema/provider.go @@ -9,7 +9,7 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) var ReservedProviderFields = []string{ diff --git a/helper/schema/provider_test.go b/internal/legacy/helper/schema/provider_test.go similarity index 99% rename from helper/schema/provider_test.go rename to internal/legacy/helper/schema/provider_test.go index 1f9b5e8bf..3f3eff4e2 100644 --- a/helper/schema/provider_test.go +++ b/internal/legacy/helper/schema/provider_test.go @@ -11,7 +11,7 @@ import ( "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestProvider_impl(t *testing.T) { diff --git a/helper/schema/provisioner.go b/internal/legacy/helper/schema/provisioner.go similarity index 99% rename from helper/schema/provisioner.go rename to internal/legacy/helper/schema/provisioner.go index eee155bfb..d0ee581be 100644 --- a/helper/schema/provisioner.go +++ b/internal/legacy/helper/schema/provisioner.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) // Provisioner represents a resource provisioner in Terraform and properly diff --git a/helper/schema/provisioner_test.go b/internal/legacy/helper/schema/provisioner_test.go similarity index 99% rename from helper/schema/provisioner_test.go rename to internal/legacy/helper/schema/provisioner_test.go index bac6610d3..228dacd72 100644 --- a/helper/schema/provisioner_test.go +++ b/internal/legacy/helper/schema/provisioner_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestProvisioner_impl(t *testing.T) { diff --git a/helper/schema/resource.go b/internal/legacy/helper/schema/resource.go similarity index 99% rename from helper/schema/resource.go rename to internal/legacy/helper/schema/resource.go index dcfb32aea..28fa54e38 100644 --- a/helper/schema/resource.go +++ b/internal/legacy/helper/schema/resource.go @@ -6,7 +6,7 @@ import ( "log" "strconv" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/zclconf/go-cty/cty" ) diff --git a/helper/schema/resource_data.go b/internal/legacy/helper/schema/resource_data.go similarity index 99% rename from helper/schema/resource_data.go rename to internal/legacy/helper/schema/resource_data.go index fb9387e29..3a61e3493 100644 --- a/helper/schema/resource_data.go +++ b/internal/legacy/helper/schema/resource_data.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/gocty" ) diff --git a/helper/schema/resource_data_get_source.go b/internal/legacy/helper/schema/resource_data_get_source.go similarity index 100% rename from helper/schema/resource_data_get_source.go rename to internal/legacy/helper/schema/resource_data_get_source.go diff --git a/helper/schema/resource_data_test.go b/internal/legacy/helper/schema/resource_data_test.go similarity index 99% rename from helper/schema/resource_data_test.go rename to internal/legacy/helper/schema/resource_data_test.go index 5af0ff8e5..22ad45b6b 100644 --- a/helper/schema/resource_data_test.go +++ b/internal/legacy/helper/schema/resource_data_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestResourceDataGet(t *testing.T) { diff --git a/helper/schema/resource_diff.go b/internal/legacy/helper/schema/resource_diff.go similarity index 99% rename from helper/schema/resource_diff.go rename to internal/legacy/helper/schema/resource_diff.go index 47b548104..72d4711eb 100644 --- a/helper/schema/resource_diff.go +++ b/internal/legacy/helper/schema/resource_diff.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) // newValueWriter is a minor re-implementation of MapFieldWriter to include diff --git a/helper/schema/resource_diff_test.go b/internal/legacy/helper/schema/resource_diff_test.go similarity index 99% rename from helper/schema/resource_diff_test.go rename to internal/legacy/helper/schema/resource_diff_test.go index e6897b731..7cb9d5188 100644 --- a/helper/schema/resource_diff_test.go +++ b/internal/legacy/helper/schema/resource_diff_test.go @@ -8,7 +8,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) // testSetFunc is a very simple function we use to test a foo/bar complex set. diff --git a/helper/schema/resource_importer.go b/internal/legacy/helper/schema/resource_importer.go similarity index 100% rename from helper/schema/resource_importer.go rename to internal/legacy/helper/schema/resource_importer.go diff --git a/helper/schema/resource_test.go b/internal/legacy/helper/schema/resource_test.go similarity index 99% rename from helper/schema/resource_test.go rename to internal/legacy/helper/schema/resource_test.go index a532dba39..954b1a705 100644 --- a/helper/schema/resource_test.go +++ b/internal/legacy/helper/schema/resource_test.go @@ -10,7 +10,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" diff --git a/helper/schema/resource_timeout.go b/internal/legacy/helper/schema/resource_timeout.go similarity index 99% rename from helper/schema/resource_timeout.go rename to internal/legacy/helper/schema/resource_timeout.go index 5ad7aafc8..cf9654bcb 100644 --- a/helper/schema/resource_timeout.go +++ b/internal/legacy/helper/schema/resource_timeout.go @@ -6,7 +6,7 @@ import ( "time" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/mitchellh/copystructure" ) diff --git a/helper/schema/resource_timeout_test.go b/internal/legacy/helper/schema/resource_timeout_test.go similarity index 99% rename from helper/schema/resource_timeout_test.go rename to internal/legacy/helper/schema/resource_timeout_test.go index e53bbd849..f5091755b 100644 --- a/helper/schema/resource_timeout_test.go +++ b/internal/legacy/helper/schema/resource_timeout_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestResourceTimeout_ConfigDecode_badkey(t *testing.T) { diff --git a/helper/schema/schema.go b/internal/legacy/helper/schema/schema.go similarity index 99% rename from helper/schema/schema.go rename to internal/legacy/helper/schema/schema.go index 089e6b213..488fcaaaf 100644 --- a/helper/schema/schema.go +++ b/internal/legacy/helper/schema/schema.go @@ -23,7 +23,7 @@ import ( "sync" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/mitchellh/copystructure" "github.com/mitchellh/mapstructure" ) @@ -1286,7 +1286,7 @@ func (m schemaMap) diffString( if os == ns && !all && !computed { // They're the same value. If there old value is not blank or we - // have an ID, then return right away since we're already setup. + // have an ID, then return right away since we're already set up. if os != "" || d.Id() != "" { return nil } diff --git a/helper/schema/schema_test.go b/internal/legacy/helper/schema/schema_test.go similarity index 99% rename from helper/schema/schema_test.go rename to internal/legacy/helper/schema/schema_test.go index 4199f3ddf..2a9da0f68 100644 --- a/helper/schema/schema_test.go +++ b/internal/legacy/helper/schema/schema_test.go @@ -12,8 +12,8 @@ import ( "testing" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/helper/hashcode" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) func TestEnvDefaultFunc(t *testing.T) { diff --git a/helper/schema/serialize.go b/internal/legacy/helper/schema/serialize.go similarity index 100% rename from helper/schema/serialize.go rename to internal/legacy/helper/schema/serialize.go diff --git a/helper/schema/serialize_test.go b/internal/legacy/helper/schema/serialize_test.go similarity index 100% rename from helper/schema/serialize_test.go rename to internal/legacy/helper/schema/serialize_test.go diff --git a/helper/schema/set.go b/internal/legacy/helper/schema/set.go similarity index 98% rename from helper/schema/set.go rename to internal/legacy/helper/schema/set.go index 8ee89e475..b44035c7c 100644 --- a/helper/schema/set.go +++ b/internal/legacy/helper/schema/set.go @@ -8,7 +8,7 @@ import ( "strconv" "sync" - "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/internal/legacy/helper/hashcode" ) // HashString hashes strings. If you want a Set of strings, this is the diff --git a/helper/schema/set_test.go b/internal/legacy/helper/schema/set_test.go similarity index 100% rename from helper/schema/set_test.go rename to internal/legacy/helper/schema/set_test.go diff --git a/helper/schema/shims.go b/internal/legacy/helper/schema/shims.go similarity index 98% rename from helper/schema/shims.go rename to internal/legacy/helper/schema/shims.go index d2dbff53c..b8cbf6b22 100644 --- a/helper/schema/shims.go +++ b/internal/legacy/helper/schema/shims.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) // DiffFromValues takes the current state and desired state as cty.Values and diff --git a/helper/schema/shims_test.go b/internal/legacy/helper/schema/shims_test.go similarity index 99% rename from helper/schema/shims_test.go rename to internal/legacy/helper/schema/shims_test.go index 050286a02..90e616e63 100644 --- a/helper/schema/shims_test.go +++ b/internal/legacy/helper/schema/shims_test.go @@ -13,9 +13,9 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/configs/hcl2shim" - "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/internal/legacy/helper/hashcode" + "github.com/hashicorp/terraform/internal/legacy/terraform" "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/helper/schema/testing.go b/internal/legacy/helper/schema/testing.go similarity index 89% rename from helper/schema/testing.go rename to internal/legacy/helper/schema/testing.go index 122782174..3b328a87c 100644 --- a/helper/schema/testing.go +++ b/internal/legacy/helper/schema/testing.go @@ -3,7 +3,7 @@ package schema import ( "testing" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/internal/legacy/terraform" ) // TestResourceDataRaw creates a ResourceData from a raw configuration map. diff --git a/helper/schema/valuetype.go b/internal/legacy/helper/schema/valuetype.go similarity index 100% rename from helper/schema/valuetype.go rename to internal/legacy/helper/schema/valuetype.go diff --git a/helper/schema/valuetype_string.go b/internal/legacy/helper/schema/valuetype_string.go similarity index 100% rename from helper/schema/valuetype_string.go rename to internal/legacy/helper/schema/valuetype_string.go diff --git a/internal/legacy/terraform/context_components.go b/internal/legacy/terraform/context_components.go new file mode 100644 index 000000000..c893a16b4 --- /dev/null +++ b/internal/legacy/terraform/context_components.go @@ -0,0 +1,65 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" +) + +// contextComponentFactory is the interface that Context uses +// to initialize various components such as providers and provisioners. +// This factory gets more information than the raw maps using to initialize +// a Context. This information is used for debugging. +type contextComponentFactory interface { + // ResourceProvider creates a new ResourceProvider with the given type. + ResourceProvider(typ addrs.Provider) (providers.Interface, error) + ResourceProviders() []string + + // ResourceProvisioner creates a new ResourceProvisioner with the given + // type. + ResourceProvisioner(typ string) (provisioners.Interface, error) + ResourceProvisioners() []string +} + +// basicComponentFactory just calls a factory from a map directly. +type basicComponentFactory struct { + providers map[addrs.Provider]providers.Factory + provisioners map[string]ProvisionerFactory +} + +func (c *basicComponentFactory) ResourceProviders() []string { + var result []string + for k := range c.providers { + result = append(result, k.String()) + } + return result +} + +func (c *basicComponentFactory) ResourceProvisioners() []string { + var result []string + for k := range c.provisioners { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvider(typ addrs.Provider) (providers.Interface, error) { + f, ok := c.providers[typ] + if !ok { + return nil, fmt.Errorf("unknown provider %q", typ.String()) + } + + return f() +} + +func (c *basicComponentFactory) ResourceProvisioner(typ string) (provisioners.Interface, error) { + f, ok := c.provisioners[typ] + if !ok { + return nil, fmt.Errorf("unknown provisioner %q", typ) + } + + return f() +} diff --git a/terraform/diff.go b/internal/legacy/terraform/diff.go similarity index 100% rename from terraform/diff.go rename to internal/legacy/terraform/diff.go diff --git a/terraform/diff_test.go b/internal/legacy/terraform/diff_test.go similarity index 100% rename from terraform/diff_test.go rename to internal/legacy/terraform/diff_test.go diff --git a/internal/legacy/terraform/features.go b/internal/legacy/terraform/features.go new file mode 100644 index 000000000..97c77bdbd --- /dev/null +++ b/internal/legacy/terraform/features.go @@ -0,0 +1,7 @@ +package terraform + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/terraform/instancetype.go b/internal/legacy/terraform/instancetype.go similarity index 100% rename from terraform/instancetype.go rename to internal/legacy/terraform/instancetype.go diff --git a/terraform/instancetype_string.go b/internal/legacy/terraform/instancetype_string.go similarity index 100% rename from terraform/instancetype_string.go rename to internal/legacy/terraform/instancetype_string.go diff --git a/internal/legacy/terraform/provider_mock.go b/internal/legacy/terraform/provider_mock.go new file mode 100644 index 000000000..2a6f6dbf0 --- /dev/null +++ b/internal/legacy/terraform/provider_mock.go @@ -0,0 +1,364 @@ +package terraform + +import ( + "encoding/json" + "sync" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/providers" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests + + PrepareProviderConfigCalled bool + PrepareProviderConfigResponse providers.PrepareProviderConfigResponse + PrepareProviderConfigRequest providers.PrepareProviderConfigRequest + PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse + + ValidateResourceTypeConfigCalled bool + ValidateResourceTypeConfigTypeName string + ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse + ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest + ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse + + ValidateDataSourceConfigCalled bool + ValidateDataSourceConfigTypeName string + ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse + ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest + ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureCalled bool + ConfigureResponse providers.ConfigureResponse + ConfigureRequest providers.ConfigureRequest + ConfigureFn func(providers.ConfigureRequest) providers.ConfigureResponse + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + // Legacy return type for existing tests, which will be shimmed into an + // ImportResourceStateResponse if set + ImportStateReturn []*InstanceState + + ReadDataSourceCalled bool + ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + CloseCalled bool + CloseError error +} + +func (p *MockProvider) GetSchema() providers.GetSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetSchemaCalled = true + return p.getSchema() +} + +func (p *MockProvider) getSchema() providers.GetSchemaResponse { + // This version of getSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + + ret := providers.GetSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } + if p.GetSchemaReturn != nil { + ret.Provider.Block = p.GetSchemaReturn.Provider + ret.ProviderMeta.Block = p.GetSchemaReturn.ProviderMeta + for n, s := range p.GetSchemaReturn.DataSources { + ret.DataSources[n] = providers.Schema{ + Block: s, + } + } + for n, s := range p.GetSchemaReturn.ResourceTypes { + ret.ResourceTypes[n] = providers.Schema{ + Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), + Block: s, + } + } + } + + return ret +} + +func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { + p.Lock() + defer p.Unlock() + + p.PrepareProviderConfigCalled = true + p.PrepareProviderConfigRequest = r + if p.PrepareProviderConfigFn != nil { + return p.PrepareProviderConfigFn(r) + } + p.PrepareProviderConfigResponse.PreparedConfig = r.Config + return p.PrepareProviderConfigResponse +} + +func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateResourceTypeConfigCalled = true + p.ValidateResourceTypeConfigRequest = r + + if p.ValidateResourceTypeConfigFn != nil { + return p.ValidateResourceTypeConfigFn(r) + } + + return p.ValidateResourceTypeConfigResponse +} + +func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceConfigCalled = true + p.ValidateDataSourceConfigRequest = r + + if p.ValidateDataSourceConfigFn != nil { + return p.ValidateDataSourceConfigFn(r) + } + + return p.ValidateDataSourceConfigResponse +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + p.Lock() + defer p.Unlock() + + schemas := p.getSchema() + schema := schemas.ResourceTypes[r.TypeName] + schemaType := schema.Block.ImpliedType() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + resp := p.UpgradeResourceStateResponse + + if resp.UpgradedState == cty.NilVal { + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + } + return resp +} + +func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureRequest = r + + if p.ConfigureFn != nil { + return p.ConfigureFn(r) + } + + return p.ConfigureResponse +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + resp := p.ReadResourceResponse + if resp.NewState != cty.NilVal { + // make sure the NewState fits the schema + // This isn't always the case for the existing tests + newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(resp.NewState) + if err != nil { + panic(err) + } + resp.NewState = newState + return resp + } + + // just return the same state we received + resp.NewState = r.PriorState + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + p.Lock() + defer p.Unlock() + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + return p.PlanResourceChangeResponse +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + p.Lock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + p.Unlock() + + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + return p.ApplyResourceChangeResponse +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + p.Lock() + defer p.Unlock() + + if p.ImportStateReturn != nil { + for _, is := range p.ImportStateReturn { + if is.Attributes == nil { + is.Attributes = make(map[string]string) + } + is.Attributes["id"] = is.ID + + typeName := is.Ephemeral.Type + // Use the requested type if the resource has no type of it's own. + // We still return the empty type, which will error, but this prevents a panic. + if typeName == "" { + typeName = r.TypeName + } + + schema := p.GetSchemaReturn.ResourceTypes[typeName] + if schema == nil { + panic("no schema found for " + typeName) + } + + private, err := json.Marshal(is.Meta) + if err != nil { + panic(err) + } + + state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) + if err != nil { + panic(err) + } + + state, err = schema.CoerceValue(state) + if err != nil { + panic(err) + } + + p.ImportResourceStateResponse.ImportedResources = append( + p.ImportResourceStateResponse.ImportedResources, + providers.ImportedResource{ + TypeName: is.Ephemeral.Type, + State: state, + Private: private, + }) + } + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + return p.ImportResourceStateResponse +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + return p.ReadDataSourceResponse +} + +func (p *MockProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} diff --git a/internal/legacy/terraform/provisioner_mock.go b/internal/legacy/terraform/provisioner_mock.go new file mode 100644 index 000000000..2a3323541 --- /dev/null +++ b/internal/legacy/terraform/provisioner_mock.go @@ -0,0 +1,104 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/terraform/resource.go b/internal/legacy/terraform/resource.go similarity index 100% rename from terraform/resource.go rename to internal/legacy/terraform/resource.go diff --git a/terraform/resource_address.go b/internal/legacy/terraform/resource_address.go similarity index 99% rename from terraform/resource_address.go rename to internal/legacy/terraform/resource_address.go index 4acf122b3..39dc7c30c 100644 --- a/terraform/resource_address.go +++ b/internal/legacy/terraform/resource_address.go @@ -92,7 +92,7 @@ func (r *ResourceAddress) String() string { // HasResourceSpec returns true if the address has a resource spec, as // defined in the documentation: -// https://www.terraform.io/docs/internals/resource-addressing.html +// https://www.terraform.io/docs/cli/state/resource-addressing.html // In particular, this returns false if the address contains only // a module path, thus addressing the entire module. func (r *ResourceAddress) HasResourceSpec() bool { diff --git a/terraform/resource_address_test.go b/internal/legacy/terraform/resource_address_test.go similarity index 100% rename from terraform/resource_address_test.go rename to internal/legacy/terraform/resource_address_test.go diff --git a/terraform/resource_mode.go b/internal/legacy/terraform/resource_mode.go similarity index 100% rename from terraform/resource_mode.go rename to internal/legacy/terraform/resource_mode.go diff --git a/terraform/resource_mode_string.go b/internal/legacy/terraform/resource_mode_string.go similarity index 100% rename from terraform/resource_mode_string.go rename to internal/legacy/terraform/resource_mode_string.go diff --git a/internal/legacy/terraform/resource_provider.go b/internal/legacy/terraform/resource_provider.go new file mode 100644 index 000000000..dccfec68b --- /dev/null +++ b/internal/legacy/terraform/resource_provider.go @@ -0,0 +1,236 @@ +package terraform + +// ResourceProvider is a legacy interface for providers. +// +// This is retained only for compatibility with legacy code. The current +// interface for providers is providers.Interface, in the sibling directory +// named "providers". +type ResourceProvider interface { + /********************************************************************* + * Functions related to the provider + *********************************************************************/ + + // ProviderSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. + // + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + + // Input was used prior to v0.12 to ask the provider to prompt the user + // for input to complete the configuration. + // + // From v0.12 onwards this method is never called because Terraform Core + // is able to handle the necessary input logic itself based on the + // schema returned from GetSchema. + Input(UIInput, *ResourceConfig) (*ResourceConfig, error) + + // Validate is called once at the beginning with the raw configuration + // (no interpolation done) and can return a list of warnings and/or + // errors. + // + // This is called once with the provider configuration only. It may not + // be called at all if no provider configuration is given. + // + // This should not assume that any values of the configurations are valid. + // The primary use case of this call is to check that required keys are + // set. + Validate(*ResourceConfig) ([]string, []error) + + // Configure configures the provider itself with the configuration + // given. This is useful for setting things like access keys. + // + // This won't be called at all if no provider configuration is given. + // + // Configure returns an error if it occurred. + Configure(*ResourceConfig) error + + // Resources returns all the available resource types that this provider + // knows how to manage. + Resources() []ResourceType + + // Stop is called when the provider should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + /********************************************************************* + * Functions related to individual resources + *********************************************************************/ + + // ValidateResource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateResource(string, *ResourceConfig) ([]string, []error) + + // Apply applies a diff to a specific resource and returns the new + // resource state along with an error. + // + // If the resource state given has an empty ID, then a new resource + // is expected to be created. + Apply( + *InstanceInfo, + *InstanceState, + *InstanceDiff) (*InstanceState, error) + + // Diff diffs a resource versus a desired state and returns + // a diff. + Diff( + *InstanceInfo, + *InstanceState, + *ResourceConfig) (*InstanceDiff, error) + + // Refresh refreshes a resource and updates all of its attributes + // with the latest information. + Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) + + /********************************************************************* + * Functions related to importing + *********************************************************************/ + + // ImportState requests that the given resource be imported. + // + // The returned InstanceState only requires ID be set. Importing + // will always call Refresh after the state to complete it. + // + // IMPORTANT: InstanceState doesn't have the resource type attached + // to it. A type must be specified on the state via the Ephemeral + // field on the state. + // + // This function can return multiple states. Normally, an import + // will map 1:1 to a physical resource. However, some resources map + // to multiple. For example, an AWS security group may contain many rules. + // Each rule is represented by a separate resource in Terraform, + // therefore multiple states are returned. + ImportState(*InstanceInfo, string) ([]*InstanceState, error) + + /********************************************************************* + * Functions related to data resources + *********************************************************************/ + + // ValidateDataSource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per data source instance. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateDataSource(string, *ResourceConfig) ([]string, []error) + + // DataSources returns all of the available data sources that this + // provider implements. + DataSources() []DataSource + + // ReadDataDiff produces a diff that represents the state that will + // be produced when the given data source is read using a later call + // to ReadDataApply. + ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + + // ReadDataApply initializes a data instance using the configuration + // in a diff produced by ReadDataDiff. + ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) +} + +// ResourceProviderCloser is an interface that providers that can close +// connections that aren't needed anymore must implement. +type ResourceProviderCloser interface { + Close() error +} + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// ResourceProviderFactory is a function type that creates a new instance +// of a resource provider. +type ResourceProviderFactory func() (ResourceProvider, error) + +// ResourceProviderFactoryFixed is a helper that creates a +// ResourceProviderFactory that just returns some fixed provider. +func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { + return func() (ResourceProvider, error) { + return p, nil + } +} + +func ProviderHasResource(p ResourceProvider, n string) bool { + for _, rt := range p.Resources() { + if rt.Name == n { + return true + } + } + + return false +} + +func ProviderHasDataSource(p ResourceProvider, n string) bool { + for _, rt := range p.DataSources() { + if rt.Name == n { + return true + } + } + + return false +} + +const errPluginInit = ` +Plugin reinitialization required. Please run "terraform init". + +Plugins are external binaries that Terraform uses to access and manipulate +resources. The configuration provided requires plugins which can't be located, +don't satisfy the version constraints, or are otherwise incompatible. + +Terraform automatically discovers provider requirements from your +configuration, including providers used in child modules. To see the +requirements and constraints, run "terraform providers". + +%s +` diff --git a/terraform/resource_provider_mock.go b/internal/legacy/terraform/resource_provider_mock.go similarity index 100% rename from terraform/resource_provider_mock.go rename to internal/legacy/terraform/resource_provider_mock.go diff --git a/terraform/resource_provisioner.go b/internal/legacy/terraform/resource_provisioner.go similarity index 100% rename from terraform/resource_provisioner.go rename to internal/legacy/terraform/resource_provisioner.go diff --git a/terraform/resource_provisioner_mock.go b/internal/legacy/terraform/resource_provisioner_mock.go similarity index 100% rename from terraform/resource_provisioner_mock.go rename to internal/legacy/terraform/resource_provisioner_mock.go diff --git a/terraform/resource_test.go b/internal/legacy/terraform/resource_test.go similarity index 100% rename from terraform/resource_test.go rename to internal/legacy/terraform/resource_test.go diff --git a/internal/legacy/terraform/schemas.go b/internal/legacy/terraform/schemas.go new file mode 100644 index 000000000..15f6d5e7b --- /dev/null +++ b/internal/legacy/terraform/schemas.go @@ -0,0 +1,285 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Schemas is a container for various kinds of schema that Terraform needs +// during processing. +type Schemas struct { + Providers map[addrs.Provider]*ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(provider addrs.Provider) *ProviderSchema { + if ss.Providers == nil { + return nil + } + return ss.Providers[provider] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(provider addrs.Provider) *configschema.Block { + ps := ss.ProviderSchema(provider) + if ps == nil { + return nil + } + return ps.Provider +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(provider addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(provider) + if ps == nil || ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] +} + +// LoadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. +// +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[addrs.Provider]*ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, components) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(fqn addrs.Provider) { + name := fqn.String() + + if _, exists := schemas[fqn]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", name) + provider, err := components.ResourceProvider(fqn) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[fqn] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + provider.Close() + }() + + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[fqn] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + s := &ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: make(map[string]*configschema.Block), + DataSources: make(map[string]*configschema.Block), + + ResourceTypeSchemaVersions: make(map[string]uint64), + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version provider configuration for provider %q", name), + ) + } + + for t, r := range resp.ResourceTypes { + s.ResourceTypes[t] = r.Block + s.ResourceTypeSchemaVersions[t] = uint64(r.Version) + if r.Version < 0 { + diags = diags.Append( + fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, name), + ) + } + } + + for t, d := range resp.DataSources { + s.DataSources[t] = d.Block + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, name), + ) + } + } + + schemas[fqn] = s + + if resp.ProviderMeta.Block != nil { + s.ProviderMeta = resp.ProviderMeta.Block + } + } + + if config != nil { + for _, fqn := range config.ProviderTypes() { + ensure(fqn) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeAddr := range needed { + ensure(typeAddr) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + provisioner, err := components.ResourceProvisioner(name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + if closer, ok := provisioner.(ResourceProvisionerCloser); ok { + closer.Close() + } + }() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + schemas[name] = resp.Provisioner + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, components) + diags = diags.Append(childDiags) + } + } + + return diags +} + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ProviderMeta *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ps.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ps.SchemaForResourceType(addr.Mode, addr.Type) +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/terraform/state.go b/internal/legacy/terraform/state.go similarity index 100% rename from terraform/state.go rename to internal/legacy/terraform/state.go diff --git a/terraform/state_filter.go b/internal/legacy/terraform/state_filter.go similarity index 100% rename from terraform/state_filter.go rename to internal/legacy/terraform/state_filter.go diff --git a/terraform/state_test.go b/internal/legacy/terraform/state_test.go similarity index 100% rename from terraform/state_test.go rename to internal/legacy/terraform/state_test.go diff --git a/terraform/state_upgrade_v1_to_v2.go b/internal/legacy/terraform/state_upgrade_v1_to_v2.go similarity index 100% rename from terraform/state_upgrade_v1_to_v2.go rename to internal/legacy/terraform/state_upgrade_v1_to_v2.go diff --git a/terraform/state_upgrade_v2_to_v3.go b/internal/legacy/terraform/state_upgrade_v2_to_v3.go similarity index 100% rename from terraform/state_upgrade_v2_to_v3.go rename to internal/legacy/terraform/state_upgrade_v2_to_v3.go diff --git a/terraform/state_v1.go b/internal/legacy/terraform/state_v1.go similarity index 100% rename from terraform/state_v1.go rename to internal/legacy/terraform/state_v1.go diff --git a/terraform/testing.go b/internal/legacy/terraform/testing.go similarity index 100% rename from terraform/testing.go rename to internal/legacy/terraform/testing.go diff --git a/internal/legacy/terraform/ui_input.go b/internal/legacy/terraform/ui_input.go new file mode 100644 index 000000000..688bcf71e --- /dev/null +++ b/internal/legacy/terraform/ui_input.go @@ -0,0 +1,32 @@ +package terraform + +import "context" + +// UIInput is the interface that must be implemented to ask for input +// from this user. This should forward the request to wherever the user +// inputs things to ask for values. +type UIInput interface { + Input(context.Context, *InputOpts) (string, error) +} + +// InputOpts are options for asking for input. +type InputOpts struct { + // Id is a unique ID for the question being asked that might be + // used for logging or to look up a prior answered question. + Id string + + // Query is a human-friendly question for inputting this value. + Query string + + // Description is a description about what this option is. Be wary + // that this will probably be in a terminal so split lines as you see + // necessary. + Description string + + // Default will be the value returned if no data is entered. + Default string + + // Secret should be true if we are asking for sensitive input. + // If attached to a TTY, Terraform will disable echo. + Secret bool +} diff --git a/internal/legacy/terraform/ui_input_mock.go b/internal/legacy/terraform/ui_input_mock.go new file mode 100644 index 000000000..e2d9c3848 --- /dev/null +++ b/internal/legacy/terraform/ui_input_mock.go @@ -0,0 +1,25 @@ +package terraform + +import "context" + +// MockUIInput is an implementation of UIInput that can be used for tests. +type MockUIInput struct { + InputCalled bool + InputOpts *InputOpts + InputReturnMap map[string]string + InputReturnString string + InputReturnError error + InputFn func(*InputOpts) (string, error) +} + +func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + i.InputCalled = true + i.InputOpts = opts + if i.InputFn != nil { + return i.InputFn(opts) + } + if i.InputReturnMap != nil { + return i.InputReturnMap[opts.Id], i.InputReturnError + } + return i.InputReturnString, i.InputReturnError +} diff --git a/internal/legacy/terraform/ui_input_prefix.go b/internal/legacy/terraform/ui_input_prefix.go new file mode 100644 index 000000000..b5d32b1e8 --- /dev/null +++ b/internal/legacy/terraform/ui_input_prefix.go @@ -0,0 +1,20 @@ +package terraform + +import ( + "context" + "fmt" +) + +// PrefixUIInput is an implementation of UIInput that prefixes the ID +// with a string, allowing queries to be namespaced. +type PrefixUIInput struct { + IdPrefix string + QueryPrefix string + UIInput UIInput +} + +func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) + opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) + return i.UIInput.Input(ctx, opts) +} diff --git a/internal/legacy/terraform/ui_input_prefix_test.go b/internal/legacy/terraform/ui_input_prefix_test.go new file mode 100644 index 000000000..dff42c39c --- /dev/null +++ b/internal/legacy/terraform/ui_input_prefix_test.go @@ -0,0 +1,27 @@ +package terraform + +import ( + "context" + "testing" +) + +func TestPrefixUIInput_impl(t *testing.T) { + var _ UIInput = new(PrefixUIInput) +} + +func TestPrefixUIInput(t *testing.T) { + input := new(MockUIInput) + prefix := &PrefixUIInput{ + IdPrefix: "foo", + UIInput: input, + } + + _, err := prefix.Input(context.Background(), &InputOpts{Id: "bar"}) + if err != nil { + t.Fatalf("err: %s", err) + } + + if input.InputOpts.Id != "foo.bar" { + t.Fatalf("bad: %#v", input.InputOpts) + } +} diff --git a/internal/legacy/terraform/ui_output.go b/internal/legacy/terraform/ui_output.go new file mode 100644 index 000000000..84427c63d --- /dev/null +++ b/internal/legacy/terraform/ui_output.go @@ -0,0 +1,7 @@ +package terraform + +// UIOutput is the interface that must be implemented to output +// data to the end user. +type UIOutput interface { + Output(string) +} diff --git a/internal/legacy/terraform/ui_output_callback.go b/internal/legacy/terraform/ui_output_callback.go new file mode 100644 index 000000000..135a91c5f --- /dev/null +++ b/internal/legacy/terraform/ui_output_callback.go @@ -0,0 +1,9 @@ +package terraform + +type CallbackUIOutput struct { + OutputFn func(string) +} + +func (o *CallbackUIOutput) Output(v string) { + o.OutputFn(v) +} diff --git a/internal/legacy/terraform/ui_output_callback_test.go b/internal/legacy/terraform/ui_output_callback_test.go new file mode 100644 index 000000000..1dd5ccddf --- /dev/null +++ b/internal/legacy/terraform/ui_output_callback_test.go @@ -0,0 +1,9 @@ +package terraform + +import ( + "testing" +) + +func TestCallbackUIOutput_impl(t *testing.T) { + var _ UIOutput = new(CallbackUIOutput) +} diff --git a/internal/legacy/terraform/ui_output_mock.go b/internal/legacy/terraform/ui_output_mock.go new file mode 100644 index 000000000..d828c921c --- /dev/null +++ b/internal/legacy/terraform/ui_output_mock.go @@ -0,0 +1,21 @@ +package terraform + +import "sync" + +// MockUIOutput is an implementation of UIOutput that can be used for tests. +type MockUIOutput struct { + sync.Mutex + OutputCalled bool + OutputMessage string + OutputFn func(string) +} + +func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() + o.OutputCalled = true + o.OutputMessage = v + if o.OutputFn != nil { + o.OutputFn(v) + } +} diff --git a/internal/legacy/terraform/ui_output_mock_test.go b/internal/legacy/terraform/ui_output_mock_test.go new file mode 100644 index 000000000..0a23c2e23 --- /dev/null +++ b/internal/legacy/terraform/ui_output_mock_test.go @@ -0,0 +1,9 @@ +package terraform + +import ( + "testing" +) + +func TestMockUIOutput(t *testing.T) { + var _ UIOutput = new(MockUIOutput) +} diff --git a/terraform/upgrade_state_v1_test.go b/internal/legacy/terraform/upgrade_state_v1_test.go similarity index 100% rename from terraform/upgrade_state_v1_test.go rename to internal/legacy/terraform/upgrade_state_v1_test.go diff --git a/terraform/upgrade_state_v2_test.go b/internal/legacy/terraform/upgrade_state_v2_test.go similarity index 100% rename from terraform/upgrade_state_v2_test.go rename to internal/legacy/terraform/upgrade_state_v2_test.go diff --git a/internal/legacy/terraform/util.go b/internal/legacy/terraform/util.go new file mode 100644 index 000000000..7966b58dd --- /dev/null +++ b/internal/legacy/terraform/util.go @@ -0,0 +1,75 @@ +package terraform + +import ( + "sort" +) + +// Semaphore is a wrapper around a channel to provide +// utility methods to clarify that we are treating the +// channel as a semaphore +type Semaphore chan struct{} + +// NewSemaphore creates a semaphore that allows up +// to a given limit of simultaneous acquisitions +func NewSemaphore(n int) Semaphore { + if n <= 0 { + panic("semaphore with limit <=0") + } + ch := make(chan struct{}, n) + return Semaphore(ch) +} + +// Acquire is used to acquire an available slot. +// Blocks until available. +func (s Semaphore) Acquire() { + s <- struct{}{} +} + +// TryAcquire is used to do a non-blocking acquire. +// Returns a bool indicating success +func (s Semaphore) TryAcquire() bool { + select { + case s <- struct{}{}: + return true + default: + return false + } +} + +// Release is used to return a slot. Acquire must +// be called as a pre-condition. +func (s Semaphore) Release() { + select { + case <-s: + default: + panic("release without an acquire") + } +} + +// strSliceContains checks if a given string is contained in a slice +// When anybody asks why Go needs generics, here you go. +func strSliceContains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + return false +} + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/internal/legacy/terraform/util_test.go b/internal/legacy/terraform/util_test.go new file mode 100644 index 000000000..8b3907e23 --- /dev/null +++ b/internal/legacy/terraform/util_test.go @@ -0,0 +1,91 @@ +package terraform + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func TestSemaphore(t *testing.T) { + s := NewSemaphore(2) + timer := time.AfterFunc(time.Second, func() { + panic("deadlock") + }) + defer timer.Stop() + + s.Acquire() + if !s.TryAcquire() { + t.Fatalf("should acquire") + } + if s.TryAcquire() { + t.Fatalf("should not acquire") + } + s.Release() + s.Release() + + // This release should panic + defer func() { + r := recover() + if r == nil { + t.Fatalf("should panic") + } + }() + s.Release() +} + +func TestStrSliceContains(t *testing.T) { + if strSliceContains(nil, "foo") { + t.Fatalf("Bad") + } + if strSliceContains([]string{}, "foo") { + t.Fatalf("Bad") + } + if strSliceContains([]string{"bar"}, "foo") { + t.Fatalf("Bad") + } + if !strSliceContains([]string{"bar", "foo"}, "foo") { + t.Fatalf("Bad") + } +} + +func TestUniqueStrings(t *testing.T) { + cases := []struct { + Input []string + Expected []string + }{ + { + []string{}, + []string{}, + }, + { + []string{"x"}, + []string{"x"}, + }, + { + []string{"a", "b", "c"}, + []string{"a", "b", "c"}, + }, + { + []string{"a", "a", "a"}, + []string{"a"}, + }, + { + []string{"a", "b", "a", "b", "a", "a"}, + []string{"a", "b"}, + }, + { + []string{"c", "b", "a", "c", "b"}, + []string{"a", "b", "c"}, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("unique-%d", i), func(t *testing.T) { + actual := uniqueStrings(tc.Input) + if !reflect.DeepEqual(tc.Expected, actual) { + t.Fatalf("Expected: %q\nGot: %q", tc.Expected, actual) + } + }) + } +} diff --git a/terraform/version.go b/internal/legacy/terraform/version.go similarity index 100% rename from terraform/version.go rename to internal/legacy/terraform/version.go diff --git a/internal/legacy/terraform/version_required.go b/internal/legacy/terraform/version_required.go new file mode 100644 index 000000000..4c9cb34a4 --- /dev/null +++ b/internal/legacy/terraform/version_required.go @@ -0,0 +1,62 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/configs" + + tfversion "github.com/hashicorp/terraform/version" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { + if config == nil { + return nil + } + + var diags tfdiags.Diagnostics + module := config.Module + + for _, constraint := range module.CoreVersionConstraints { + if !constraint.Required.Check(tfversion.SemVer) { + switch { + case len(config.Path) == 0: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + Subject: constraint.DeclRange.Ptr(), + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + config.Path, config.SourceAddr, tfversion.String(), + ), + Subject: constraint.DeclRange.Ptr(), + }) + } + } + } + + for _, c := range config.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) + } + + return diags +} diff --git a/internal/logging/logging.go b/internal/logging/logging.go index 970686344..6e2c948ad 100644 --- a/internal/logging/logging.go +++ b/internal/logging/logging.go @@ -45,7 +45,7 @@ func init() { logger = newHCLogger("") logWriter = logger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}) - // setup the default std library logger to use our output + // set up the default std library logger to use our output log.SetFlags(0) log.SetPrefix("") log.SetOutput(logWriter) diff --git a/internal/logging/panic.go b/internal/logging/panic.go index 960d4f685..211a1231d 100644 --- a/internal/logging/panic.go +++ b/internal/logging/panic.go @@ -172,11 +172,22 @@ func (l *logPanicWrapper) Debug(msg string, args ...interface{}) { // output if this is the start of the traceback. An occasional false // positive shouldn't be a big deal, since this is only retrieved after an // error of some sort. - l.inPanic = l.inPanic || strings.HasPrefix(msg, "panic: ") || strings.HasPrefix(msg, "fatal error: ") + + panicPrefix := strings.HasPrefix(msg, "panic: ") || strings.HasPrefix(msg, "fatal error: ") + + l.inPanic = l.inPanic || panicPrefix if l.inPanic && l.panicRecorder != nil { l.panicRecorder(msg) } + // If we have logging turned on, we need to prevent panicwrap from seeing + // this as a core panic. This can be done by obfuscating the panic error + // line. + if panicPrefix { + colon := strings.Index(msg, ":") + msg = strings.ToUpper(msg[:colon]) + msg[colon:] + } + l.Logger.Debug(msg, args...) } diff --git a/internal/logging/panic_test.go b/internal/logging/panic_test.go index e83a0ba5a..d2eb7a90b 100644 --- a/internal/logging/panic_test.go +++ b/internal/logging/panic_test.go @@ -1,9 +1,12 @@ package logging import ( + "bytes" "fmt" "strings" "testing" + + "github.com/hashicorp/go-hclog" ) func TestPanicRecorder(t *testing.T) { @@ -49,3 +52,31 @@ func TestPanicLimit(t *testing.T) { } } } + +func TestLogPanicWrapper(t *testing.T) { + var buf bytes.Buffer + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Name: "test", + Level: hclog.Debug, + Output: &buf, + DisableTime: true, + }) + + wrapped := (&logPanicWrapper{ + Logger: logger, + }).Named("test") + + wrapped.Debug("panic: invalid foo of bar") + wrapped.Debug("\tstack trace") + + expected := `[DEBUG] test.test: PANIC: invalid foo of bar +[DEBUG] test.test: stack trace +` + + got := buf.String() + + if expected != got { + t.Fatalf("Expected:\n%q\nGot:\n%q", expected, got) + } + +} diff --git a/internal/provider-simple/main/main.go b/internal/provider-simple/main/main.go new file mode 100644 index 000000000..be0ad2ef4 --- /dev/null +++ b/internal/provider-simple/main/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "github.com/hashicorp/terraform/internal/grpcwrap" + simple "github.com/hashicorp/terraform/internal/provider-simple" + "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + GRPCProviderFunc: func() tfplugin5.ProviderServer { + return grpcwrap.Provider(simple.Provider()) + }, + }) +} diff --git a/internal/provider-simple/provider.go b/internal/provider-simple/provider.go new file mode 100644 index 000000000..2da9ac6c0 --- /dev/null +++ b/internal/provider-simple/provider.go @@ -0,0 +1,128 @@ +// simple provider a minimal provider implementation for testing +package simple + +import ( + "errors" + "time" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +type simple struct { + schema providers.GetSchemaResponse +} + +func Provider() providers.Interface { + simpleResource := providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Computed: true, + Type: cty.String, + }, + "value": { + Optional: true, + Type: cty.String, + }, + }, + }, + } + + return simple{ + schema: providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: nil, + }, + ResourceTypes: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + DataSources: map[string]providers.Schema{ + "simple_resource": simpleResource, + }, + }, + } +} + +func (s simple) GetSchema() providers.GetSchemaResponse { + return s.schema +} + +func (s simple) PrepareProviderConfig(req providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { + return resp +} + +func (s simple) ValidateResourceTypeConfig(req providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { + return resp +} + +func (s simple) ValidateDataSourceConfig(req providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { + return resp +} + +func (p simple) UpgradeResourceState(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + ty := p.schema.ResourceTypes[req.TypeName].Block.ImpliedType() + val, err := ctyjson.Unmarshal(req.RawStateJSON, ty) + resp.Diagnostics = resp.Diagnostics.Append(err) + resp.UpgradedState = val + return resp +} + +func (s simple) Configure(providers.ConfigureRequest) (resp providers.ConfigureResponse) { + return resp +} + +func (s simple) Stop() error { + return nil +} + +func (s simple) ReadResource(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + // just return the same state we received + resp.NewState = req.PriorState + return resp +} + +func (s simple) PlanResourceChange(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.UnknownVal(cty.String) + } + + resp.PlannedState = cty.ObjectVal(m) + return resp +} + +func (s simple) ApplyResourceChange(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + if req.PlannedState.IsNull() { + resp.NewState = req.PlannedState + return resp + } + + m := req.PlannedState.AsValueMap() + _, ok := m["id"] + if !ok { + m["id"] = cty.StringVal(time.Now().String()) + } + resp.NewState = cty.ObjectVal(m) + + return resp +} + +func (s simple) ImportResourceState(providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("unsupported")) + return resp +} + +func (s simple) ReadDataSource(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("static_id") + resp.State = cty.ObjectVal(m) + return resp +} + +func (s simple) Close() error { + return nil +} diff --git a/internal/provider-terraform/main/main.go b/internal/provider-terraform/main/main.go new file mode 100644 index 000000000..a8ad4bd49 --- /dev/null +++ b/internal/provider-terraform/main/main.go @@ -0,0 +1,17 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/terraform" + "github.com/hashicorp/terraform/internal/grpcwrap" + "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + // Provide a binary version of the internal terraform provider for testing + plugin.Serve(&plugin.ServeOpts{ + GRPCProviderFunc: func() tfplugin5.ProviderServer { + return grpcwrap.Provider(terraform.NewProvider()) + }, + }) +} diff --git a/internal/providercache/installer.go b/internal/providercache/installer.go index 0fb8eeae9..6f1a1fd3a 100644 --- a/internal/providercache/installer.go +++ b/internal/providercache/installer.go @@ -65,6 +65,12 @@ func NewInstaller(targetDir *Dir, source getproviders.Source) *Installer { } } +// ProviderSource returns the getproviders.Source that the installer would +// use for installing any new providers. +func (i *Installer) ProviderSource() getproviders.Source { + return i.source +} + // SetGlobalCacheDir activates a second tier of caching for the receiving // installer, with the given directory used as a read-through cache for // installation operations that need to retrieve new packages. @@ -304,6 +310,18 @@ NeedProvider: preferredHashes = lock.PreferredHashes() } + // If our target directory already has the provider version that fulfills the lock file, carry on + if installed := i.targetDir.ProviderVersion(provider, version); installed != nil { + if len(preferredHashes) > 0 { + if matches, _ := installed.MatchesAnyHash(preferredHashes); matches { + if cb := evts.ProviderAlreadyInstalled; cb != nil { + cb(provider, version) + } + continue + } + } + } + if i.globalCacheDir != nil { // Step 3a: If our global cache already has this version available then // we'll just link it in. @@ -385,7 +403,7 @@ NeedProvider: // implementation, so we don't worry about potentially // creating a duplicate here. newHashes = append(newHashes, newHash) - lock = locks.SetProvider(provider, version, reqs[provider], newHashes) + locks.SetProvider(provider, version, reqs[provider], newHashes) if cb := evts.LinkFromCacheSuccess; cb != nil { cb(provider, version, new.PackageDir) @@ -511,7 +529,7 @@ NeedProvider: // and so the hashes would cover only the current platform. newHashes = append(newHashes, meta.AcceptableHashes()...) } - lock = locks.SetProvider(provider, version, reqs[provider], newHashes) + locks.SetProvider(provider, version, reqs[provider], newHashes) if cb := evts.FetchPackageSuccess; cb != nil { cb(provider, version, new.PackageDir, authResult) @@ -576,5 +594,5 @@ func (err InstallerError) Error() string { providerErr := err.ProviderErrors[addr] fmt.Fprintf(&b, "- %s: %s\n", addr, providerErr) } - return b.String() + return strings.TrimSpace(b.String()) } diff --git a/internal/providercache/installer_events_test.go b/internal/providercache/installer_events_test.go new file mode 100644 index 000000000..ab7032630 --- /dev/null +++ b/internal/providercache/installer_events_test.go @@ -0,0 +1,184 @@ +package providercache + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/internal/getproviders" +) + +type testInstallerEventLogItem struct { + // The name of the event that occurred, using the same names as the + // fields of InstallerEvents. + Event string + + // Most events relate to a specific provider. For the few event types + // that don't, this will be a zero-value Provider. + Provider addrs.Provider + + // The type of Args will vary by event, but it should always be something + // that can be deterministically compared using the go-cmp package. + Args interface{} +} + +// installerLogEventsForTests is a test helper that produces an InstallerEvents +// that writes event notifications (*testInstallerEventLogItem values) to +// the given channel as they occur. +// +// The caller must keep reading from the read side of the given channel +// throughout any installer operation using the returned InstallerEvents. +// It's the caller's responsibility to close the channel if needed and +// clean up any goroutines it started to process the events. +// +// The exact sequence of events emitted for an installer operation might +// change in future, if e.g. we introduce new event callbacks to the +// InstallerEvents struct. Tests using this mechanism may therefore need to +// be updated to reflect such changes. +// +// (The channel-based approach here is so that the control flow for event +// processing will belong to the caller and thus it can safely use its +// testing.T object(s) to emit log lines without non-test-case frames in the +// call stack.) +func installerLogEventsForTests(into chan<- *testInstallerEventLogItem) *InstallerEvents { + return &InstallerEvents{ + PendingProviders: func(reqs map[addrs.Provider]getproviders.VersionConstraints) { + into <- &testInstallerEventLogItem{ + Event: "PendingProviders", + Args: reqs, + } + }, + ProviderAlreadyInstalled: func(provider addrs.Provider, selectedVersion getproviders.Version) { + into <- &testInstallerEventLogItem{ + Event: "ProviderAlreadyInstalled", + Provider: provider, + Args: selectedVersion, + } + }, + BuiltInProviderAvailable: func(provider addrs.Provider) { + into <- &testInstallerEventLogItem{ + Event: "BuiltInProviderAvailable", + Provider: provider, + } + }, + BuiltInProviderFailure: func(provider addrs.Provider, err error) { + into <- &testInstallerEventLogItem{ + Event: "BuiltInProviderFailure", + Provider: provider, + Args: err.Error(), // stringified to guarantee cmp-ability + } + }, + QueryPackagesBegin: func(provider addrs.Provider, versionConstraints getproviders.VersionConstraints, locked bool) { + into <- &testInstallerEventLogItem{ + Event: "QueryPackagesBegin", + Provider: provider, + Args: struct { + Constraints string + Locked bool + }{getproviders.VersionConstraintsString(versionConstraints), locked}, + } + }, + QueryPackagesSuccess: func(provider addrs.Provider, selectedVersion getproviders.Version) { + into <- &testInstallerEventLogItem{ + Event: "QueryPackagesSuccess", + Provider: provider, + Args: selectedVersion.String(), + } + }, + QueryPackagesFailure: func(provider addrs.Provider, err error) { + into <- &testInstallerEventLogItem{ + Event: "QueryPackagesFailure", + Provider: provider, + Args: err.Error(), // stringified to guarantee cmp-ability + } + }, + QueryPackagesWarning: func(provider addrs.Provider, warns []string) { + into <- &testInstallerEventLogItem{ + Event: "QueryPackagesWarning", + Provider: provider, + Args: warns, + } + }, + LinkFromCacheBegin: func(provider addrs.Provider, version getproviders.Version, cacheRoot string) { + into <- &testInstallerEventLogItem{ + Event: "LinkFromCacheBegin", + Provider: provider, + Args: struct { + Version string + CacheRoot string + }{version.String(), cacheRoot}, + } + }, + LinkFromCacheSuccess: func(provider addrs.Provider, version getproviders.Version, localDir string) { + into <- &testInstallerEventLogItem{ + Event: "LinkFromCacheSuccess", + Provider: provider, + Args: struct { + Version string + LocalDir string + }{version.String(), localDir}, + } + }, + LinkFromCacheFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + into <- &testInstallerEventLogItem{ + Event: "LinkFromCacheFailure", + Provider: provider, + Args: struct { + Version string + Error string + }{version.String(), err.Error()}, + } + }, + FetchPackageMeta: func(provider addrs.Provider, version getproviders.Version) { + into <- &testInstallerEventLogItem{ + Event: "FetchPackageMeta", + Provider: provider, + Args: version.String(), + } + }, + FetchPackageBegin: func(provider addrs.Provider, version getproviders.Version, location getproviders.PackageLocation) { + into <- &testInstallerEventLogItem{ + Event: "FetchPackageBegin", + Provider: provider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{version.String(), location}, + } + }, + FetchPackageSuccess: func(provider addrs.Provider, version getproviders.Version, localDir string, authResult *getproviders.PackageAuthenticationResult) { + into <- &testInstallerEventLogItem{ + Event: "FetchPackageSuccess", + Provider: provider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{version.String(), localDir, authResult.String()}, + } + }, + FetchPackageFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + into <- &testInstallerEventLogItem{ + Event: "FetchPackageFailure", + Provider: provider, + Args: struct { + Version string + Error string + }{version.String(), err.Error()}, + } + }, + ProvidersFetched: func(authResults map[addrs.Provider]*getproviders.PackageAuthenticationResult) { + into <- &testInstallerEventLogItem{ + Event: "ProvidersFetched", + Args: authResults, + } + }, + HashPackageFailure: func(provider addrs.Provider, version getproviders.Version, err error) { + into <- &testInstallerEventLogItem{ + Event: "HashPackageFailure", + Provider: provider, + Args: struct { + Version string + Error string + }{version.String(), err.Error()}, + } + }, + } +} diff --git a/internal/providercache/installer_test.go b/internal/providercache/installer_test.go index 9e338fa74..2063fc1ea 100644 --- a/internal/providercache/installer_test.go +++ b/internal/providercache/installer_test.go @@ -8,9 +8,13 @@ import ( "net/http" "net/http/httptest" "os" + "path/filepath" "strings" "testing" + "github.com/apparentlymart/go-versions/versions" + "github.com/apparentlymart/go-versions/versions/constraints" + "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" svchost "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/disco" @@ -19,6 +23,1297 @@ import ( "github.com/hashicorp/terraform/internal/getproviders" ) +func TestEnsureProviderVersions(t *testing.T) { + // This is a sort of hybrid between table-driven and imperative-style + // testing, because the overall sequence of steps is the same for all + // of the test cases but the setup and verification have enough different + // permutations that it ends up being more concise to express them as + // normal code. + type Test struct { + Source getproviders.Source + Prepare func(*testing.T, *Installer, *Dir) + LockFile string + Reqs getproviders.Requirements + Mode InstallMode + Check func(*testing.T, *Dir, *depsfile.Locks) + WantErr string + WantEvents func(*Installer, *Dir) map[addrs.Provider][]*testInstallerEventLogItem + } + + // noProvider is just the zero value of addrs.Provider, which we're + // using in this test as the key for installer events that are not + // specific to a particular provider. + var noProvider addrs.Provider + beepProvider := addrs.MustParseProviderSourceString("example.com/foo/beep") + beepProviderDir := getproviders.PackageLocalDir("testdata/beep-provider") + fakePlatform := getproviders.Platform{OS: "bleep", Arch: "bloop"} + wrongPlatform := getproviders.Platform{OS: "wrong", Arch: "wrong"} + beepProviderHash := getproviders.HashScheme1.New("2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=") + terraformProvider := addrs.MustParseProviderSourceString("terraform.io/builtin/terraform") + + tests := map[string]Test{ + "no dependencies": { + Mode: InstallNewProvidersOnly, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("unexpected cache directory entries\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 0 { + t.Errorf("unexpected provider lock entries\n%s", spew.Sdump(allLocked)) + } + }, + WantEvents: func(*Installer, *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints(nil), + }, + }, + } + }, + }, + "successful initial install of one provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"2.1.0", beepProviderDir}, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "successful initial install of one provider through a cold global cache": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + globalCacheDirPath := tmpDir(t) + globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform) + inst.SetGlobalCacheDir(globalCacheDir) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"2.1.0", beepProviderDir}, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.1.0", + // NOTE: With global cache enabled, the fetch + // goes into the global cache dir and + // we then to it from the local cache dir. + filepath.Join(inst.globalCacheDir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "successful initial install of one provider through a warm global cache": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + globalCacheDirPath := tmpDir(t) + globalCacheDir := NewDirWithPlatform(globalCacheDirPath, fakePlatform) + _, err := globalCacheDir.InstallPackage( + context.Background(), + getproviders.PackageMeta{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + nil, + ) + if err != nil { + t.Fatalf("failed to populate global cache: %s", err) + } + inst.SetGlobalCacheDir(globalCacheDir) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{beepProviderHash}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "LinkFromCacheBegin", + Provider: beepProvider, + Args: struct { + Version string + CacheRoot string + }{ + "2.1.0", + inst.globalCacheDir.BasePath(), + }, + }, + { + Event: "LinkFromCacheSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "/example.com/foo/beep/2.1.0/bleep_bloop"), + }, + }, + }, + } + }, + }, + "successful reinstall of one previously-locked provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "2.0.0" + constraints = ">= 2.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.0.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.0.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.0.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"2.0.0", beepProviderDir}, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.0.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "skipped install of one previously-locked and installed provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "2.0.0" + constraints = ">= 2.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + _, err := dir.InstallPackage( + context.Background(), + getproviders.PackageMeta{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + nil, + ) + if err != nil { + t.Fatalf("installation to the test dir failed: %s", err) + } + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.0.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.0.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.0.0", + }, + { + Event: "ProviderAlreadyInstalled", + Provider: beepProvider, + Args: versions.Version{Major: 2, Minor: 0, Patch: 0}, + }, + }, + } + }, + }, + "successful upgrade of one previously-locked provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "2.0.0" + constraints = ">= 2.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Mode: InstallUpgrades, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 1 { + t.Errorf("wrong number of cache directory entries; want only one\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("2.1.0"), + getproviders.MustParseVersionConstraints(">= 2.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + + gotEntry := dir.ProviderLatestVersion(beepProvider) + wantEntry := &CachedProvider{ + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.1.0"), + PackageDir: filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + } + if diff := cmp.Diff(wantEntry, gotEntry); diff != "" { + t.Errorf("wrong cache entry\n%s", diff) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + { + Event: "ProvidersFetched", + Args: map[addrs.Provider]*getproviders.PackageAuthenticationResult{ + beepProvider: nil, + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "2.1.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"2.1.0", beepProviderDir}, + }, + { + Event: "FetchPackageSuccess", + Provider: beepProvider, + Args: struct { + Version string + LocalDir string + AuthResult string + }{ + "2.1.0", + filepath.Join(dir.BasePath(), "example.com/foo/beep/2.1.0/bleep_bloop"), + "unauthenticated", + }, + }, + }, + } + }, + }, + "successful install of a built-in provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{}, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + inst.SetBuiltInProviderTypes([]string{"terraform"}) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + terraformProvider: nil, + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + // Built-in providers are neither included in the cache + // directory nor mentioned in the lock file, because they + // are compiled directly into the Terraform executable. + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 0 { + t.Errorf("wrong number of provider lock entries; want none\n%s", spew.Sdump(allLocked)) + } + }, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + terraformProvider: constraints.IntersectionSpec(nil), + }, + }, + }, + terraformProvider: { + { + Event: "BuiltInProviderAvailable", + Provider: terraformProvider, + }, + }, + } + }, + }, + "failed install of a non-existing built-in provider": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{}, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + // NOTE: We're intentionally not calling + // inst.SetBuiltInProviderTypes to make the "terraform" + // built-in provider available here, so requests for it + // should fail. + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + terraformProvider: nil, + }, + WantErr: `some providers could not be installed: +- terraform.io/builtin/terraform: this Terraform release has no built-in provider named "terraform"`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + terraformProvider: constraints.IntersectionSpec(nil), + }, + }, + }, + terraformProvider: { + { + Event: "BuiltInProviderFailure", + Provider: terraformProvider, + Args: `this Terraform release has no built-in provider named "terraform"`, + }, + }, + } + }, + }, + "failed install when a built-in provider has a version constraint": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{}, + nil, + ), + Prepare: func(t *testing.T, inst *Installer, dir *Dir) { + inst.SetBuiltInProviderTypes([]string{"terraform"}) + }, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + terraformProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + WantErr: `some providers could not be installed: +- terraform.io/builtin/terraform: built-in providers do not support explicit version constraints`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + terraformProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + }, + terraformProvider: { + { + Event: "BuiltInProviderFailure", + Provider: terraformProvider, + Args: `built-in providers do not support explicit version constraints`, + }, + }, + } + }, + }, + "locked version is excluded by new version constraint": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("1.0.0"), + getproviders.MustParseVersionConstraints(">= 1.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: locked provider example.com/foo/beep 1.0.0 does not match configured version constraint >= 2.0.0; must use terraform init -upgrade to allow selection of new versions`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", true}, + }, + { + Event: "QueryPackagesFailure", + Provider: beepProvider, + Args: `locked provider example.com/foo/beep 1.0.0 does not match configured version constraint >= 2.0.0; must use terraform init -upgrade to allow selection of new versions`, + }, + }, + } + }, + }, + "locked version is no longer available": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("2.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "1.2.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84=", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + Check: func(t *testing.T, dir *Dir, locks *depsfile.Locks) { + if allCached := dir.AllAvailablePackages(); len(allCached) != 0 { + t.Errorf("wrong number of cache directory entries; want none\n%s", spew.Sdump(allCached)) + } + if allLocked := locks.AllProviders(); len(allLocked) != 1 { + t.Errorf("wrong number of provider lock entries; want only one\n%s", spew.Sdump(allLocked)) + } + + gotLock := locks.Provider(beepProvider) + wantLock := depsfile.NewProviderLock( + beepProvider, + getproviders.MustParseVersion("1.2.0"), + getproviders.MustParseVersionConstraints(">= 1.0.0"), + []getproviders.Hash{"h1:2y06Ykj0FRneZfGCTxI9wRTori8iB7ZL5kQ6YyEnh84="}, + ) + if diff := cmp.Diff(wantLock, gotLock, depsfile.ProviderLockComparer); diff != "" { + t.Errorf("wrong lock entry\n%s", diff) + } + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: the previously-selected version 1.2.0 is no longer available`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 1.0.0", true}, + }, + { + Event: "QueryPackagesFailure", + Provider: beepProvider, + Args: `the previously-selected version 1.2.0 is no longer available`, + }, + }, + } + }, + }, + "no versions match the version constraint": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: no available releases match the given constraints >= 2.0.0`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 2.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 2.0.0", false}, + }, + { + Event: "QueryPackagesFailure", + Provider: beepProvider, + Args: `no available releases match the given constraints >= 2.0.0`, + }, + }, + } + }, + }, + "version exists but doesn't support the current platform": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: wrongPlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: provider example.com/foo/beep 1.0.0 is not available for bleep_bloop`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 1.0.0", false}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageFailure", + Provider: beepProvider, + Args: struct { + Version string + Error string + }{ + "1.0.0", + "provider example.com/foo/beep 1.0.0 is not available for bleep_bloop", + }, + }, + }, + } + }, + }, + "available package doesn't match locked hash": { + Source: getproviders.NewMockSource( + []getproviders.PackageMeta{ + { + Provider: beepProvider, + Version: getproviders.MustParseVersion("1.0.0"), + TargetPlatform: fakePlatform, + Location: beepProviderDir, + }, + }, + nil, + ), + LockFile: ` + provider "example.com/foo/beep" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:does-not-match", + ] + } + `, + Mode: InstallNewProvidersOnly, + Reqs: getproviders.Requirements{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + WantErr: `some providers could not be installed: +- example.com/foo/beep: the local package for example.com/foo/beep 1.0.0 doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms)`, + WantEvents: func(inst *Installer, dir *Dir) map[addrs.Provider][]*testInstallerEventLogItem { + return map[addrs.Provider][]*testInstallerEventLogItem{ + noProvider: { + { + Event: "PendingProviders", + Args: map[addrs.Provider]getproviders.VersionConstraints{ + beepProvider: getproviders.MustParseVersionConstraints(">= 1.0.0"), + }, + }, + }, + beepProvider: { + { + Event: "QueryPackagesBegin", + Provider: beepProvider, + Args: struct { + Constraints string + Locked bool + }{">= 1.0.0", true}, + }, + { + Event: "QueryPackagesSuccess", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageMeta", + Provider: beepProvider, + Args: "1.0.0", + }, + { + Event: "FetchPackageBegin", + Provider: beepProvider, + Args: struct { + Version string + Location getproviders.PackageLocation + }{"1.0.0", beepProviderDir}, + }, + { + Event: "FetchPackageFailure", + Provider: beepProvider, + Args: struct { + Version string + Error string + }{ + "1.0.0", + `the local package for example.com/foo/beep 1.0.0 doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms)`, + }, + }, + }, + } + }, + }, + } + + ctx := context.Background() + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.Check == nil && test.WantEvents == nil && test.WantErr == "" { + t.Fatalf("invalid test: must set at least one of Check, WantEvents, or WantErr") + } + + outputDir := NewDirWithPlatform(tmpDir(t), fakePlatform) + source := test.Source + if source == nil { + source = getproviders.NewMockSource(nil, nil) + } + inst := NewInstaller(outputDir, source) + if test.Prepare != nil { + test.Prepare(t, inst, outputDir) + } + + locks, lockDiags := depsfile.LoadLocksFromBytes([]byte(test.LockFile), "test.lock.hcl") + if lockDiags.HasErrors() { + t.Fatalf("invalid lock file: %s", lockDiags.Err().Error()) + } + + providerEvents := make(map[addrs.Provider][]*testInstallerEventLogItem) + eventsCh := make(chan *testInstallerEventLogItem) + var newLocks *depsfile.Locks + var instErr error + go func(ch chan *testInstallerEventLogItem) { + events := installerLogEventsForTests(ch) + ctx := events.OnContext(ctx) + newLocks, instErr = inst.EnsureProviderVersions(ctx, locks, test.Reqs, test.Mode) + close(eventsCh) // exits the event loop below + }(eventsCh) + for evt := range eventsCh { + // We do the event collection in the main goroutine, rather than + // running the installer itself in the main goroutine, so that + // we can safely t.Log in here without violating the testing.T + // usage rules. + if evt.Provider == (addrs.Provider{}) { + t.Logf("%s(%s)", evt.Event, spew.Sdump(evt.Args)) + } else { + t.Logf("%s: %s(%s)", evt.Provider, evt.Event, spew.Sdump(evt.Args)) + } + providerEvents[evt.Provider] = append(providerEvents[evt.Provider], evt) + } + + if test.WantErr != "" { + if instErr == nil { + t.Errorf("succeeded; want error\nwant: %s", test.WantErr) + } else if got, want := instErr.Error(), test.WantErr; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + } else if instErr != nil { + t.Errorf("unexpected error\ngot: %s", instErr.Error()) + } + + if test.Check != nil { + test.Check(t, outputDir, newLocks) + } + + if test.WantEvents != nil { + wantEvents := test.WantEvents(inst, outputDir) + if diff := cmp.Diff(wantEvents, providerEvents); diff != "" { + t.Errorf("wrong installer events\n%s", diff) + } + } + }) + } +} + func TestEnsureProviderVersions_local_source(t *testing.T) { // create filesystem source using the test provider cache dir source := getproviders.NewFilesystemMirrorSource("testdata/cachedir") @@ -437,3 +1732,14 @@ func fakeRegistryHandler(resp http.ResponseWriter, req *http.Request) { resp.WriteHeader(404) resp.Write([]byte(`unrecognized path scheme`)) } + +// In order to be able to compare the recorded temp dir paths, we need to +// normalize the path to match what the installer would report. +func tmpDir(t *testing.T) string { + d := t.TempDir() + unlinked, err := filepath.EvalSymlinks(d) + if err != nil { + t.Fatal(err) + } + return filepath.Clean(unlinked) +} diff --git a/internal/providercache/package_install.go b/internal/providercache/package_install.go index ca8a3073d..57b388888 100644 --- a/internal/providercache/package_install.go +++ b/internal/providercache/package_install.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/terraform/httpclient" "github.com/hashicorp/terraform/internal/copy" - copydir "github.com/hashicorp/terraform/internal/copy" "github.com/hashicorp/terraform/internal/getproviders" ) @@ -126,7 +125,7 @@ func installFromLocalArchive(ctx context.Context, meta getproviders.PackageMeta, filename := meta.Location.String() - err := unzip.Decompress(targetDir, filename, true) + err := unzip.Decompress(targetDir, filename, true, 0000) if err != nil { return authResult, err } @@ -154,12 +153,58 @@ func installFromLocalDir(ctx context.Context, meta getproviders.PackageMeta, tar // these two paths are not pointing at the same physical directory on // disk. This compares the files by their OS-level device and directory // entry identifiers, not by their virtual filesystem paths. - if same, err := copydir.SameFile(absNew, absCurrent); same { + if same, err := copy.SameFile(absNew, absCurrent); same { return nil, fmt.Errorf("cannot install existing provider directory %s to itself", targetDir) } else if err != nil { return nil, fmt.Errorf("failed to determine if %s and %s are the same: %s", sourceDir, targetDir, err) } + var authResult *getproviders.PackageAuthenticationResult + if meta.Authentication != nil { + // (we have this here for completeness but note that local filesystem + // mirrors typically don't include enough information for package + // authentication and so we'll rarely get in here in practice.) + var err error + if authResult, err = meta.Authentication.AuthenticatePackage(meta.Location); err != nil { + return nil, err + } + } + + // If the caller provided at least one hash in allowedHashes then at + // least one of those hashes ought to match. However, for local directories + // in particular we can't actually verify the legacy "zh:" hash scheme + // because it requires access to the original .zip archive, and so as a + // measure of pragmatism we'll treat a set of hashes where all are "zh:" + // the same as no hashes at all, and let anything pass. This is definitely + // non-ideal but accepted for two reasons: + // - Packages we find on local disk can be considered a little more trusted + // than packages coming from over the network, because we assume that + // they were either placed intentionally by an operator or they were + // automatically installed by a previous network operation that would've + // itself verified the hashes. + // - Our installer makes a concerted effort to record at least one new-style + // hash for each lock entry, so we should very rarely end up in this + // situation anyway. + suitableHashCount := 0 + for _, hash := range allowedHashes { + if !hash.HasScheme(getproviders.HashSchemeZip) { + suitableHashCount++ + } + } + if suitableHashCount > 0 { + if matches, err := meta.MatchesAnyHash(allowedHashes); err != nil { + return authResult, fmt.Errorf( + "failed to calculate checksum for %s %s package at %s: %s", + meta.Provider, meta.Version, meta.Location, err, + ) + } else if !matches { + return authResult, fmt.Errorf( + "the local package for %s %s doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms)", + meta.Provider, meta.Version, + ) + } + } + // Delete anything that's already present at this path first. err = os.RemoveAll(targetDir) if err != nil && !os.IsNotExist(err) { diff --git a/internal/providercache/testdata/beep-provider/terraform-provider-beep b/internal/providercache/testdata/beep-provider/terraform-provider-beep new file mode 100644 index 000000000..e0841fd8c --- /dev/null +++ b/internal/providercache/testdata/beep-provider/terraform-provider-beep @@ -0,0 +1,2 @@ +This is not a real provider executable. It's just here to give the installer +something to copy in some of our installer test cases. diff --git a/internal/provisioner-local-exec/main/main.go b/internal/provisioner-local-exec/main/main.go new file mode 100644 index 000000000..86a6f07fc --- /dev/null +++ b/internal/provisioner-local-exec/main/main.go @@ -0,0 +1,17 @@ +package main + +import ( + localexec "github.com/hashicorp/terraform/builtin/provisioners/local-exec" + "github.com/hashicorp/terraform/internal/grpcwrap" + "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + // Provide a binary version of the internal terraform provider for testing + plugin.Serve(&plugin.ServeOpts{ + GRPCProvisionerFunc: func() tfplugin5.ProvisionerServer { + return grpcwrap.Provisioner(localexec.New()) + }, + }) +} diff --git a/internal/terminal/impl_others.go b/internal/terminal/impl_others.go new file mode 100644 index 000000000..fc819aee6 --- /dev/null +++ b/internal/terminal/impl_others.go @@ -0,0 +1,53 @@ +// +build !windows + +package terminal + +import ( + "os" + + "golang.org/x/term" +) + +// This is the implementation for all operating systems except Windows, where +// we don't expect to need to do any special initialization to get a working +// Virtual Terminal. +// +// For this implementation we just delegate everything upstream to +// golang.org/x/term, since it already has a variety of different +// implementations for quirks of more esoteric operating systems like plan9, +// and will hopefully grow to include others as Go is ported to other platforms +// in future. +// +// For operating systems that golang.org/x/term doesn't support either, it +// defaults to indicating that nothing is a terminal and returns an error when +// asked for a size, which we'll handle below. + +func configureOutputHandle(f *os.File) (*OutputStream, error) { + return &OutputStream{ + File: f, + isTerminal: isTerminalGolangXTerm, + getColumns: getColumnsGolangXTerm, + }, nil +} + +func configureInputHandle(f *os.File) (*InputStream, error) { + return &InputStream{ + File: f, + isTerminal: isTerminalGolangXTerm, + }, nil +} + +func isTerminalGolangXTerm(f *os.File) bool { + return term.IsTerminal(int(f.Fd())) +} + +func getColumnsGolangXTerm(f *os.File) int { + width, _, err := term.GetSize(int(f.Fd())) + if err != nil { + // Suggests that it's either not a terminal at all or that we're on + // a platform that golang.org/x/term doesn't support. In both cases + // we'll just return the placeholder default value. + return defaultColumns + } + return width +} diff --git a/internal/terminal/impl_windows.go b/internal/terminal/impl_windows.go new file mode 100644 index 000000000..f6aaa78b9 --- /dev/null +++ b/internal/terminal/impl_windows.go @@ -0,0 +1,161 @@ +// +build windows + +package terminal + +import ( + "fmt" + "os" + "syscall" + + "golang.org/x/sys/windows" + + // We're continuing to use this third-party library on Windows because it + // has the additional IsCygwinTerminal function, which includes some useful + // heuristics for recognizing when a pipe seems to be connected to a + // legacy terminal emulator on Windows versions that lack true pty support. + // We now use golang.org/x/term's functionality on other platforms. + isatty "github.com/mattn/go-isatty" +) + +func configureOutputHandle(f *os.File) (*OutputStream, error) { + ret := &OutputStream{ + File: f, + } + + if fd := f.Fd(); isatty.IsTerminal(fd) { + // We have a few things to deal with here: + // - Activating UTF-8 output support (mandatory) + // - Activating virtual terminal support (optional) + // These will not succeed on Windows 8 or early versions of Windows 10. + + // UTF-8 support means switching the console "code page" to CP_UTF8. + // Notice that this doesn't take the specific file descriptor, because + // the console is just ambiently associated with our process. + err := SetConsoleOutputCP(CP_UTF8) + if err != nil { + return nil, fmt.Errorf("failed to set the console to UTF-8 mode; you may need to use a newer version of Windows: %s", err) + } + + // If the console also allows us to turn on + // ENABLE_VIRTUAL_TERMINAL_PROCESSING then we can potentially use VT + // output, although the methods of Settings will make the final + // determination on that because we might have some handles pointing at + // terminals and other handles pointing at files/pipes. + ret.getColumns = getColumnsWindowsConsole + var mode uint32 + err = windows.GetConsoleMode(windows.Handle(fd), &mode) + if err != nil { + return ret, nil // We'll treat this as success but without VT support + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + err = windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return ret, nil // We'll treat this as success but without VT support + } + + // If we get here then we've successfully turned on VT processing, so + // we can return an OutputStream that answers true when asked if it + // is a Terminal. + ret.isTerminal = staticTrue + return ret, nil + + } else if isatty.IsCygwinTerminal(fd) { + // Cygwin terminals -- and other VT100 "fakers" for older versions of + // Windows -- are not really terminals in the usual sense, but rather + // are pipes between the child process (Terraform) and the terminal + // emulator. isatty.IsCygwinTerminal uses some heuristics to + // distinguish those pipes from other pipes we might see if the user + // were, for example, using the | operator on the command line. + // If we get in here then we'll assume that we can send VT100 sequences + // to this stream, even though it isn't a terminal in the usual sense. + + ret.isTerminal = staticTrue + // TODO: Is it possible to detect the width of these fake terminals? + return ret, nil + } + + // If we fall out here then we have a non-terminal filehandle, so we'll + // just accept all of the default OutputStream behaviors + return ret, nil +} + +func configureInputHandle(f *os.File) (*InputStream, error) { + ret := &InputStream{ + File: f, + } + + if fd := f.Fd(); isatty.IsTerminal(fd) { + // We have to activate UTF-8 input, or else we fail. This will not + // succeed on Windows 8 or early versions of Windows 10. + // Notice that this doesn't take the specific file descriptor, because + // the console is just ambiently associated with our process. + err := SetConsoleCP(CP_UTF8) + if err != nil { + return nil, fmt.Errorf("failed to set the console to UTF-8 mode; you may need to use a newer version of Windows: %s", err) + } + ret.isTerminal = staticTrue + return ret, nil + } else if isatty.IsCygwinTerminal(fd) { + // As with the output handles above, we'll use isatty's heuristic to + // pretend that a pipe from mintty or a similar userspace terminal + // emulator is actually a terminal. + ret.isTerminal = staticTrue + return ret, nil + } + + // If we fall out here then we have a non-terminal filehandle, so we'll + // just accept all of the default InputStream behaviors + return ret, nil +} + +func getColumnsWindowsConsole(f *os.File) int { + // We'll just unconditionally ask the given file for its console buffer + // info here, and let it fail if the file isn't actually a console. + // (In practice, the init functions above only hook up this function + // if the handle looks like a console, so this should succeed.) + var info windows.ConsoleScreenBufferInfo + err := windows.GetConsoleScreenBufferInfo(windows.Handle(f.Fd()), &info) + if err != nil { + return defaultColumns + } + return int(info.Size.X) +} + +// Unfortunately not all of the Windows kernel functions we need are in +// x/sys/windows at the time of writing, so we need to call some of them +// directly. (If you're maintaining this in future and have the capacity to +// test it well, consider checking if these functions have been added upstream +// yet and switch to their wrapper stubs if so. +var modkernel32 = windows.NewLazySystemDLL("kernel32.dll") +var procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") +var procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") + +const CP_UTF8 = 65001 + +// (These are written in the style of the stubs in x/sys/windows, which is +// a little non-idiomatic just due to the awkwardness of the low-level syscall +// interface.) + +func SetConsoleCP(codepageID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(codepageID), 0, 0) + if r1 == 0 { + err = e1 + } + return +} + +func SetConsoleOutputCP(codepageID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(codepageID), 0, 0) + if r1 == 0 { + err = e1 + } + return +} + +func staticTrue(f *os.File) bool { + return true +} + +func staticFalse(f *os.File) bool { + return false +} diff --git a/internal/terminal/panicwrap_ugh.go b/internal/terminal/panicwrap_ugh.go new file mode 100644 index 000000000..b17165b2c --- /dev/null +++ b/internal/terminal/panicwrap_ugh.go @@ -0,0 +1,78 @@ +package terminal + +import "os" + +// This file has some annoying nonsense to, yet again, work around the +// panicwrap hack. +// +// Specifically, typically when we're running Terraform the stderr handle is +// not directly connected to the terminal but is instead a pipe into a parent +// process gathering up the output just in case a panic message appears. +// However, this package needs to know whether the _real_ stderr is connected +// to a terminal and what its width is. +// +// To work around that, we'll first initialize the terminal in the parent +// process, and then capture information about stderr into an environment +// variable so we can pass it down to the child process. The child process +// will then use the environment variable to pretend that the panicwrap pipe +// has the same characteristics as the terminal that it's indirectly writing +// to. +// +// This file has some helpers for implementing that awkward handshake, but the +// handshake itself is in package main, interspersed with all of the other +// panicwrap machinery. +// +// You might think that the code in helper/wrappedstreams could avoid this +// problem, but that package is broken on Windows: it always fails to recover +// the real stderr, and it also gets an incorrect result if the user was +// redirecting or piping stdout/stdin. So... we have this hack instead, which +// gets a correct result even on Windows and even with I/O redirection. + +// StateForAfterPanicWrap is part of the workaround for panicwrap that +// captures some characteristics of stderr that the caller can pass to the +// panicwrap child process somehow and then use ReinitInsidePanicWrap. +func (s *Streams) StateForAfterPanicWrap() *PrePanicwrapState { + return &PrePanicwrapState{ + StderrIsTerminal: s.Stderr.IsTerminal(), + StderrWidth: s.Stderr.Columns(), + } +} + +// ReinitInsidePanicwrap is part of the workaround for panicwrap that +// produces a Streams containing a potentially-lying Stderr that might +// claim to be a terminal even if it's actually a pipe connected to the +// parent process. +// +// That's an okay lie in practice because the parent process will copy any +// data it recieves via that pipe verbatim to the real stderr anyway. (The +// original call to Init in the parent process should've already done any +// necessary modesetting on the Stderr terminal, if any.) +// +// The state argument can be nil if we're not running in panicwrap mode, +// in which case this function behaves exactly the same as Init. +func ReinitInsidePanicwrap(state *PrePanicwrapState) (*Streams, error) { + ret, err := Init() + if err != nil { + return ret, err + } + if state != nil { + // A lying stderr, then. + ret.Stderr = &OutputStream{ + File: ret.Stderr.File, + isTerminal: func(f *os.File) bool { + return state.StderrIsTerminal + }, + getColumns: func(f *os.File) int { + return state.StderrWidth + }, + } + } + return ret, nil +} + +// PrePanicwrapState is a horrible thing we use to work around panicwrap, +// related to both Streams.StateForAfterPanicWrap and ReinitInsidePanicwrap. +type PrePanicwrapState struct { + StderrIsTerminal bool + StderrWidth int +} diff --git a/internal/terminal/stream.go b/internal/terminal/stream.go new file mode 100644 index 000000000..6d40e1b18 --- /dev/null +++ b/internal/terminal/stream.go @@ -0,0 +1,80 @@ +package terminal + +import ( + "os" +) + +const defaultColumns int = 78 +const defaultIsTerminal bool = false + +// OutputStream represents an output stream that might or might not be connected +// to a terminal. +// +// There are typically two instances of this: one representing stdout and one +// representing stderr. +type OutputStream struct { + File *os.File + + // Interacting with a terminal is typically platform-specific, so we + // factor out these into virtual functions, although we have default + // behaviors suitable for non-Terminal output if any of these isn't + // set. (We're using function pointers rather than interfaces for this + // because it allows us to mix both normal methods and virtual methods + // on the same type, without a bunch of extra complexity.) + isTerminal func(*os.File) bool + getColumns func(*os.File) int +} + +// Columns returns a number of character cell columns that we expect will +// fill the width of the terminal that stdout is connected to, or a reasonable +// placeholder value of 78 if the output doesn't seem to be a terminal. +// +// This is a best-effort sort of function which may give an inaccurate result +// in various cases. For example, callers storing the result will not react +// to subsequent changes in the terminal width, and indeed this function itself +// may not be able to either, depending on the constraints of the current +// execution context. +func (s *OutputStream) Columns() int { + if s.getColumns == nil { + return defaultColumns + } + return s.getColumns(s.File) +} + +// IsTerminal returns true if we expect that the stream is connected to a +// terminal which supports VT100-style formatting and cursor control sequences. +func (s *OutputStream) IsTerminal() bool { + if s.isTerminal == nil { + return defaultIsTerminal + } + return s.isTerminal(s.File) +} + +// InputStream represents an input stream that might or might not be a terminal. +// +// There is typically only one instance of this type, representing stdin. +type InputStream struct { + File *os.File + + // Interacting with a terminal is typically platform-specific, so we + // factor out these into virtual functions, although we have default + // behaviors suitable for non-Terminal output if any of these isn't + // set. (We're using function pointers rather than interfaces for this + // because it allows us to mix both normal methods and virtual methods + // on the same type, without a bunch of extra complexity.) + isTerminal func(*os.File) bool +} + +// IsTerminal returns true if we expect that the stream is connected to a +// terminal which can support interactive input. +// +// If this returns false, callers might prefer to skip elaborate input prompt +// functionality like tab completion and instead just treat the input as a +// raw byte stream, or perhaps skip prompting for input at all depending on the +// situation. +func (s *InputStream) IsTerminal() bool { + if s.isTerminal == nil { + return defaultIsTerminal + } + return s.isTerminal(s.File) +} diff --git a/internal/terminal/streams.go b/internal/terminal/streams.go new file mode 100644 index 000000000..1e1de7d96 --- /dev/null +++ b/internal/terminal/streams.go @@ -0,0 +1,105 @@ +// Package terminal encapsulates some platform-specific logic for detecting +// if we're running in a terminal and, if so, properly configuring that +// terminal to meet the assumptions that the rest of Terraform makes. +// +// Specifically, Terraform requires a Terminal which supports virtual terminal +// sequences and which accepts UTF-8-encoded text. +// +// This is an abstraction only over the platform-specific detection of and +// possibly initialization of terminals. It's not intended to provide +// higher-level abstractions of the sort provided by packages like termcap or +// curses; ultimately we just assume that terminals are "standard" VT100-like +// terminals and use a subset of control codes that works across the various +// platforms we support. Our approximate target is "xterm-compatible" +// virtual terminals. +package terminal + +import ( + "fmt" + "os" +) + +// Streams represents a collection of three streams that each may or may not +// be connected to a terminal. +// +// If a stream is connected to a terminal then there are more possibilities +// available, such as detecting the current terminal width. If we're connected +// to something else, such as a pipe or a file on disk, the stream will +// typically provide placeholder values or do-nothing stubs for +// terminal-requiring operatons. +// +// Note that it's possible for only a subset of the streams to be connected +// to a terminal. For example, this happens if the user runs Terraform with +// I/O redirection where Stdout might refer to a regular disk file while Stderr +// refers to a terminal, or various other similar combinations. +type Streams struct { + Stdout *OutputStream + Stderr *OutputStream + Stdin *InputStream +} + +// Init tries to initialize a terminal, if Terraform is running in one, and +// returns an object describing what it was able to set up. +// +// An error for this function indicates that the current execution context +// can't meet Terraform's assumptions. For example, on Windows Init will return +// an error if Terraform is running in a Windows Console that refuses to +// activate UTF-8 mode, which can happen if we're running on an unsupported old +// version of Windows. +// +// Note that the success of this function doesn't mean that we're actually +// running in a terminal. It could also represent successfully detecting that +// one or more of the input/output streams is not a terminal. +func Init() (*Streams, error) { + // These configure* functions are platform-specific functions in other + // files that use //+build constraints to vary based on target OS. + + stderr, err := configureOutputHandle(os.Stderr) + if err != nil { + return nil, err + } + stdout, err := configureOutputHandle(os.Stdout) + if err != nil { + return nil, err + } + stdin, err := configureInputHandle(os.Stdin) + if err != nil { + return nil, err + } + + return &Streams{ + Stdout: stdout, + Stderr: stderr, + Stdin: stdin, + }, nil +} + +// Print is a helper for conveniently calling fmt.Fprint on the Stdout stream. +func (s *Streams) Print(a ...interface{}) (n int, err error) { + return fmt.Fprint(s.Stdout.File, a...) +} + +// Printf is a helper for conveniently calling fmt.Fprintf on the Stdout stream. +func (s *Streams) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(s.Stdout.File, format, a...) +} + +// Println is a helper for conveniently calling fmt.Fprintln on the Stdout stream. +func (s *Streams) Println(a ...interface{}) (n int, err error) { + return fmt.Fprintln(s.Stdout.File, a...) +} + +// Eprint is a helper for conveniently calling fmt.Fprint on the Stderr stream. +func (s *Streams) Eprint(a ...interface{}) (n int, err error) { + return fmt.Fprint(s.Stderr.File, a...) +} + +// Eprintf is a helper for conveniently calling fmt.Fprintf on the Stderr stream. +func (s *Streams) Eprintf(format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(s.Stderr.File, format, a...) +} + +// Eprintln is a helper for conveniently calling fmt.Fprintln on the Stderr stream. +func (s *Streams) Eprintln(a ...interface{}) (n int, err error) { + return fmt.Fprintln(s.Stderr.File, a...) +} diff --git a/internal/terminal/streams_test.go b/internal/terminal/streams_test.go new file mode 100644 index 000000000..9826b9341 --- /dev/null +++ b/internal/terminal/streams_test.go @@ -0,0 +1,38 @@ +package terminal + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestStreamsFmtHelpers(t *testing.T) { + streams, close := StreamsForTesting(t) + + streams.Print("stdout print ", 5, "\n") + streams.Eprint("stderr print ", 6, "\n") + streams.Println("stdout println", 7) + streams.Eprintln("stderr println", 8) + streams.Printf("stdout printf %d\n", 9) + streams.Eprintf("stderr printf %d\n", 10) + + outp := close(t) + + gotOut := outp.Stdout() + wantOut := `stdout print 5 +stdout println 7 +stdout printf 9 +` + if diff := cmp.Diff(wantOut, gotOut); diff != "" { + t.Errorf("wrong stdout\n%s", diff) + } + + gotErr := outp.Stderr() + wantErr := `stderr print 6 +stderr println 8 +stderr printf 10 +` + if diff := cmp.Diff(wantErr, gotErr); diff != "" { + t.Errorf("wrong stderr\n%s", diff) + } +} diff --git a/internal/terminal/testing.go b/internal/terminal/testing.go new file mode 100644 index 000000000..2830b5d0b --- /dev/null +++ b/internal/terminal/testing.go @@ -0,0 +1,191 @@ +package terminal + +import ( + "fmt" + "io" + "os" + "strings" + "sync" + "testing" +) + +// StreamsForTesting is a helper for test code that is aiming to test functions +// that interact with the input and output streams. +// +// This particular function is for the simple case of a function that only +// produces output: the returned input stream is connected to the system's +// "null device", as if a user had run Terraform with I/O redirection like +// tfplugin5.Diagnostic.Severity + 5, // 1: tfplugin5.Diagnostic.attribute:type_name -> tfplugin5.AttributePath + 23, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step + 26, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry + 27, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block + 28, // 5: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute + 29, // 6: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock + 0, // 7: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind + 0, // 8: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind + 27, // 9: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block + 2, // 10: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode + 8, // 11: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema + 32, // 12: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + 33, // 13: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + 4, // 14: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 15: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema + 8, // 16: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema + 8, // 17: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema + 3, // 18: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 19: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue + 4, // 20: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 7, // 21: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState + 3, // 22: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue + 4, // 23: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 24: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 25: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 26: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 27: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 28: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 29: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 30: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue + 3, // 31: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 32: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 33: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 34: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 35: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue + 3, // 36: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 37: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 38: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue + 5, // 39: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath + 4, // 40: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 41: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 42: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue + 3, // 43: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 44: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 45: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 46: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 47: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue + 51, // 48: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource + 4, // 49: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 50: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 51: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 52: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue + 4, // 53: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 54: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema + 4, // 55: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 56: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 57: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 58: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 59: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue + 4, // 60: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 30, // 61: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request + 34, // 62: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request + 38, // 63: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request + 40, // 64: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request + 36, // 65: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request + 42, // 66: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request + 44, // 67: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request + 46, // 68: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request + 48, // 69: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request + 50, // 70: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request + 53, // 71: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request + 24, // 72: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request + 55, // 73: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request + 57, // 74: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request + 59, // 75: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request + 24, // 76: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request + 31, // 77: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response + 35, // 78: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response + 39, // 79: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response + 41, // 80: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response + 37, // 81: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response + 43, // 82: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response + 45, // 83: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response + 47, // 84: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response + 49, // 85: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response + 52, // 86: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response + 54, // 87: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response + 25, // 88: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response + 56, // 89: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response + 58, // 90: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response + 60, // 91: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response + 25, // 92: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response + 77, // [77:93] is the sub-list for method output_type + 61, // [61:77] is the sub-list for method input_type + 61, // [61:61] is the sub-list for extension type_name + 61, // [61:61] is the sub-list for extension extendee + 0, // [0:61] is the sub-list for field type_name +} + +func init() { file_tfplugin5_proto_init() } +func file_tfplugin5_proto_init() { + if File_tfplugin5_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_tfplugin5_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Diagnostic); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RawState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_NestedBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProviderSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareProviderConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpgradeResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResourceTypeConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateDataSourceConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Configure_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlanResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResourceChange_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_ImportedResource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImportResourceState_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadDataSource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetProvisionerSchema_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateProvisionerConfig_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProvisionResource_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_tfplugin5_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tfplugin5_proto_rawDesc, + NumEnums: 3, + NumMessages: 58, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_tfplugin5_proto_goTypes, + DependencyIndexes: file_tfplugin5_proto_depIdxs, + EnumInfos: file_tfplugin5_proto_enumTypes, + MessageInfos: file_tfplugin5_proto_msgTypes, + }.Build() + File_tfplugin5_proto = out.File + file_tfplugin5_proto_rawDesc = nil + file_tfplugin5_proto_goTypes = nil + file_tfplugin5_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // ProviderClient is the client API for Provider service. // @@ -2970,10 +4640,10 @@ type ProviderClient interface { } type providerClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewProviderClient(cc *grpc.ClientConn) ProviderClient { +func NewProviderClient(cc grpc.ClientConnInterface) ProviderClient { return &providerClient{cc} } @@ -3109,40 +4779,40 @@ type ProviderServer interface { type UnimplementedProviderServer struct { } -func (*UnimplementedProviderServer) GetSchema(ctx context.Context, req *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { +func (*UnimplementedProviderServer) GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") } -func (*UnimplementedProviderServer) PrepareProviderConfig(ctx context.Context, req *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { +func (*UnimplementedProviderServer) PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") } -func (*UnimplementedProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { +func (*UnimplementedProviderServer) ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") } -func (*UnimplementedProviderServer) ValidateDataSourceConfig(ctx context.Context, req *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { +func (*UnimplementedProviderServer) ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") } -func (*UnimplementedProviderServer) UpgradeResourceState(ctx context.Context, req *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { +func (*UnimplementedProviderServer) UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") } -func (*UnimplementedProviderServer) Configure(ctx context.Context, req *Configure_Request) (*Configure_Response, error) { +func (*UnimplementedProviderServer) Configure(context.Context, *Configure_Request) (*Configure_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") } -func (*UnimplementedProviderServer) ReadResource(ctx context.Context, req *ReadResource_Request) (*ReadResource_Response, error) { +func (*UnimplementedProviderServer) ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") } -func (*UnimplementedProviderServer) PlanResourceChange(ctx context.Context, req *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { +func (*UnimplementedProviderServer) PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") } -func (*UnimplementedProviderServer) ApplyResourceChange(ctx context.Context, req *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { +func (*UnimplementedProviderServer) ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") } -func (*UnimplementedProviderServer) ImportResourceState(ctx context.Context, req *ImportResourceState_Request) (*ImportResourceState_Response, error) { +func (*UnimplementedProviderServer) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") } -func (*UnimplementedProviderServer) ReadDataSource(ctx context.Context, req *ReadDataSource_Request) (*ReadDataSource_Response, error) { +func (*UnimplementedProviderServer) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") } -func (*UnimplementedProviderServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { +func (*UnimplementedProviderServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") } @@ -3434,10 +5104,10 @@ type ProvisionerClient interface { } type provisionerClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient { +func NewProvisionerClient(cc grpc.ClientConnInterface) ProvisionerClient { return &provisionerClient{cc} } @@ -3512,16 +5182,16 @@ type ProvisionerServer interface { type UnimplementedProvisionerServer struct { } -func (*UnimplementedProvisionerServer) GetSchema(ctx context.Context, req *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { +func (*UnimplementedProvisionerServer) GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") } -func (*UnimplementedProvisionerServer) ValidateProvisionerConfig(ctx context.Context, req *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { +func (*UnimplementedProvisionerServer) ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") } -func (*UnimplementedProvisionerServer) ProvisionResource(req *ProvisionResource_Request, srv Provisioner_ProvisionResourceServer) error { +func (*UnimplementedProvisionerServer) ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error { return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") } -func (*UnimplementedProvisionerServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { +func (*UnimplementedProvisionerServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") } diff --git a/internal/typeexpr/get_type.go b/internal/typeexpr/get_type.go index da84f5dcc..de5465b99 100644 --- a/internal/typeexpr/get_type.go +++ b/internal/typeexpr/get_type.go @@ -167,7 +167,7 @@ func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { // modifier optional(...) to indicate an optional attribute. If // so, we'll unwrap that first and make a note about it being // optional for when we construct the type below. - if call, diags := hcl.ExprCall(atyExpr); !diags.HasErrors() { + if call, callDiags := hcl.ExprCall(atyExpr); !callDiags.HasErrors() { if call.Name == "optional" { if len(call.Arguments) < 1 { diags = append(diags, &hcl.Diagnostic{ diff --git a/lang/eval.go b/lang/eval.go index 381ec4288..fab3c933d 100644 --- a/lang/eval.go +++ b/lang/eval.go @@ -72,8 +72,13 @@ func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, // EvalSelfBlock evaluates the given body only within the scope of the provided // object and instance key data. References to the object must use self, and the -// key data will only contain count.index or each.key. +// key data will only contain count.index or each.key. The static values for +// terraform and path will also be available in this context. func (s *Scope) EvalSelfBlock(body hcl.Body, self cty.Value, schema *configschema.Block, keyData instances.RepetitionData) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + spec := schema.DecoderSpec() + vals := make(map[string]cty.Value) vals["self"] = self @@ -88,12 +93,55 @@ func (s *Scope) EvalSelfBlock(body hcl.Body, self cty.Value, schema *configschem }) } + refs, refDiags := References(hcldec.Variables(body, spec)) + diags = diags.Append(refDiags) + + terraformAttrs := map[string]cty.Value{} + pathAttrs := map[string]cty.Value{} + + // We could always load the static values for Path and Terraform values, + // but we want to parse the references so that we can get source ranges for + // user diagnostics. + for _, ref := range refs { + // we already loaded the self value + if ref.Subject == addrs.Self { + continue + } + + switch subj := ref.Subject.(type) { + case addrs.PathAttr: + val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, ref.SourceRange)) + diags = diags.Append(valDiags) + pathAttrs[subj.Name] = val + + case addrs.TerraformAttr: + val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, ref.SourceRange)) + diags = diags.Append(valDiags) + terraformAttrs[subj.Name] = val + + case addrs.CountAttr, addrs.ForEachAttr: + // each and count have already been handled. + + default: + // This should have been caught in validation, but point the user + // to the correct location in case something slipped through. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid reference`, + Detail: fmt.Sprintf("The reference to %q is not valid in this context", ref.Subject), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + + vals["path"] = cty.ObjectVal(pathAttrs) + vals["terraform"] = cty.ObjectVal(terraformAttrs) + ctx := &hcl.EvalContext{ Variables: vals, Functions: s.Functions(), } - var diags tfdiags.Diagnostics val, decDiags := hcldec.Decode(body, schema.DecoderSpec(), ctx) diags = diags.Append(decDiags) return val, diags @@ -129,10 +177,12 @@ func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfd if convErr != nil { val = cty.UnknownVal(wantType) diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Incorrect value type", - Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), - Subject: expr.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: ctx, }) } } @@ -273,10 +323,10 @@ func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceabl switch k := subj.Key.(type) { case addrs.IntKey: self, hclDiags = hcl.Index(val, cty.NumberIntVal(int64(k)), ref.SourceRange.ToHCL().Ptr()) - diags.Append(hclDiags) + diags = diags.Append(hclDiags) case addrs.StringKey: self, hclDiags = hcl.Index(val, cty.StringVal(string(k)), ref.SourceRange.ToHCL().Ptr()) - diags.Append(hclDiags) + diags = diags.Append(hclDiags) default: self = val } diff --git a/lang/eval_test.go b/lang/eval_test.go index f9ee894d6..0ddcef193 100644 --- a/lang/eval_test.go +++ b/lang/eval_test.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/instances" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" @@ -642,3 +643,128 @@ func formattedJSONValue(val cty.Value) string { json.Indent(&buf, j, "", " ") return buf.String() } + +func TestScopeEvalSelfBlock(t *testing.T) { + data := &dataForTests{ + PathAttrs: map[string]cty.Value{ + "module": cty.StringVal("foo/bar"), + "cwd": cty.StringVal("/home/foo/bar"), + "root": cty.StringVal("/home/foo"), + }, + TerraformAttrs: map[string]cty.Value{ + "workspace": cty.StringVal("default"), + }, + } + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": { + Type: cty.String, + }, + "num": { + Type: cty.Number, + }, + }, + } + + tests := []struct { + Config string + Self cty.Value + KeyData instances.RepetitionData + Want map[string]cty.Value + }{ + { + Config: `attr = self.foo`, + Self: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + KeyData: instances.RepetitionData{ + CountIndex: cty.NumberIntVal(0), + }, + Want: map[string]cty.Value{ + "attr": cty.StringVal("bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `num = count.index`, + KeyData: instances.RepetitionData{ + CountIndex: cty.NumberIntVal(0), + }, + Want: map[string]cty.Value{ + "attr": cty.NullVal(cty.String), + "num": cty.NumberIntVal(0), + }, + }, + { + Config: `attr = each.key`, + KeyData: instances.RepetitionData{ + EachKey: cty.StringVal("a"), + }, + Want: map[string]cty.Value{ + "attr": cty.StringVal("a"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.cwd`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("/home/foo/bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.module`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("foo/bar"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = path.root`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("/home/foo"), + "num": cty.NullVal(cty.Number), + }, + }, + { + Config: `attr = terraform.workspace`, + Want: map[string]cty.Value{ + "attr": cty.StringVal("default"), + "num": cty.NullVal(cty.Number), + }, + }, + } + + for _, test := range tests { + t.Run(test.Config, func(t *testing.T) { + file, parseDiags := hclsyntax.ParseConfig([]byte(test.Config), "", hcl.Pos{Line: 1, Column: 1}) + if len(parseDiags) != 0 { + t.Errorf("unexpected diagnostics during parse") + for _, diag := range parseDiags { + t.Errorf("- %s", diag) + } + return + } + + body := file.Body + + scope := &Scope{ + Data: data, + } + + gotVal, ctxDiags := scope.EvalSelfBlock(body, test.Self, schema, test.KeyData) + if ctxDiags.HasErrors() { + t.Fatal(ctxDiags.Err()) + } + + wantVal := cty.ObjectVal(test.Want) + + if !gotVal.RawEquals(wantVal) { + t.Errorf( + "wrong result\nexpr: %s\ngot: %#v\nwant: %#v", + test.Config, gotVal, wantVal, + ) + } + }) + } +} diff --git a/lang/funcs/collection.go b/lang/funcs/collection.go index cca7423c0..33073d747 100644 --- a/lang/funcs/collection.go +++ b/lang/funcs/collection.go @@ -3,13 +3,13 @@ package funcs import ( "errors" "fmt" + "math/big" "sort" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" "github.com/zclconf/go-cty/cty/function" "github.com/zclconf/go-cty/cty/function/stdlib" - "github.com/zclconf/go-cty/cty/gocty" ) var LengthFunc = function.New(&function.Spec{ @@ -70,6 +70,9 @@ var AllTrueFunc = function.New(&function.Spec{ result := cty.True for it := args[0].ElementIterator(); it.Next(); { _, v := it.Element() + if !v.IsKnown() { + return cty.UnknownVal(cty.Bool), nil + } if v.IsNull() { return cty.False, nil } @@ -94,8 +97,13 @@ var AnyTrueFunc = function.New(&function.Spec{ Type: function.StaticReturnType(cty.Bool), Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { result := cty.False + var hasUnknown bool for it := args[0].ElementIterator(); it.Next(); { _, v := it.Element() + if !v.IsKnown() { + hasUnknown = true + continue + } if v.IsNull() { continue } @@ -104,6 +112,9 @@ var AnyTrueFunc = function.New(&function.Spec{ return cty.True, nil } } + if hasUnknown { + return cty.UnknownVal(cty.Bool), nil + } return result, nil }, }) @@ -196,74 +207,6 @@ var IndexFunc = function.New(&function.Spec{ }, }) -// Flatten until it's not a cty.List, and return whether the value is known. -// We can flatten lists with unknown values, as long as they are not -// lists themselves. -func flattener(flattenList cty.Value) ([]cty.Value, bool) { - out := make([]cty.Value, 0) - for it := flattenList.ElementIterator(); it.Next(); { - _, val := it.Element() - if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { - if !val.IsKnown() { - return out, false - } - - res, known := flattener(val) - if !known { - return res, known - } - out = append(out, res...) - } else { - out = append(out, val) - } - } - return out, true -} - -// ListFunc constructs a function that takes an arbitrary number of arguments -// and returns a list containing those values in the same order. -// -// This function is deprecated in Terraform v0.12 -var ListFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) == 0 { - return cty.NilType, errors.New("at least one argument is required") - } - - argTypes := make([]cty.Type, len(args)) - - for i, arg := range args { - argTypes[i] = arg.Type() - } - - retType, _ := convert.UnifyUnsafe(argTypes) - if retType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.List(retType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - newList := make([]cty.Value, 0, len(args)) - - for _, arg := range args { - // We already know this will succeed because of the checks in our Type func above - arg, _ = convert.Convert(arg, retType.ElementType()) - newList = append(newList, arg) - } - - return cty.ListVal(newList), nil - }, -}) - // LookupFunc constructs a function that performs dynamic lookups of map types. var LookupFunc = function.New(&function.Spec{ Params: []function.Parameter{ @@ -354,81 +297,6 @@ var LookupFunc = function.New(&function.Spec{ }, }) -// MapFunc constructs a function that takes an even number of arguments and -// returns a map whose elements are constructed from consecutive pairs of arguments. -// -// This function is deprecated in Terraform v0.12 -var MapFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - VarParam: &function.Parameter{ - Name: "vals", - Type: cty.DynamicPseudoType, - AllowUnknown: true, - AllowDynamicType: true, - AllowNull: true, - }, - Type: func(args []cty.Value) (ret cty.Type, err error) { - if len(args) < 2 || len(args)%2 != 0 { - return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) - } - - argTypes := make([]cty.Type, len(args)/2) - index := 0 - - for i := 0; i < len(args); i += 2 { - argTypes[index] = args[i+1].Type() - index++ - } - - valType, _ := convert.UnifyUnsafe(argTypes) - if valType == cty.NilType { - return cty.NilType, errors.New("all arguments must have the same type") - } - - return cty.Map(valType), nil - }, - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - for _, arg := range args { - if !arg.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - } - - outputMap := make(map[string]cty.Value) - - for i := 0; i < len(args); i += 2 { - - keyVal, err := convert.Convert(args[i], cty.String) - if err != nil { - return cty.NilVal, err - } - if keyVal.IsNull() { - return cty.NilVal, fmt.Errorf("argument %d is a null key", i+1) - } - key := keyVal.AsString() - - val := args[i+1] - - var variable cty.Value - err = gocty.FromCtyValue(val, &variable) - if err != nil { - return cty.NilVal, err - } - - // We already know this will succeed because of the checks in our Type func above - variable, _ = convert.Convert(variable, retType.ElementType()) - - // Check for duplicate keys - if _, ok := outputMap[key]; ok { - return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) - } - outputMap[key] = variable - } - - return cty.MapVal(outputMap), nil - }, -}) - // MatchkeysFunc constructs a function that constructs a new list by taking a // subset of elements from one list whose indexes match the corresponding // indexes of values in another list. @@ -536,27 +404,45 @@ var SumFunc = function.New(&function.Spec{ arg := args[0].AsValueSlice() ty := args[0].Type() - var i float64 - var s float64 - if !ty.IsListType() && !ty.IsSetType() && !ty.IsTupleType() { return cty.NilVal, function.NewArgErrorf(0, fmt.Sprintf("argument must be list, set, or tuple. Received %s", ty.FriendlyName())) } - if !args[0].IsKnown() { + if !args[0].IsWhollyKnown() { return cty.UnknownVal(cty.Number), nil } - for _, v := range arg { - - if err := gocty.FromCtyValue(v, &i); err != nil { - return cty.UnknownVal(cty.Number), function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") - } else { - s += i + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } } + }() + + s := arg[0] + if s.IsNull() { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + for _, v := range arg[1:] { + if v.IsNull() { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + v, err = convert.Convert(v, cty.Number) + if err != nil { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + s = s.Add(v) } - return cty.NumberFloatVal(s), nil + return s, nil }, }) @@ -614,19 +500,47 @@ var TransposeFunc = function.New(&function.Spec{ }, }) -// helper function to add an element to a list, if it does not already exist -func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { - for _, ele := range slice { - eq, err := stdlib.Equal(ele, element) - if err != nil { - return slice, err - } - if eq.True() { - return slice, nil - } - } - return append(slice, element), nil -} +// ListFunc constructs a function that takes an arbitrary number of arguments +// and returns a list containing those values in the same order. +// +// This function is deprecated in Terraform v0.12 +var ListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("the \"list\" function was deprecated in Terraform v0.12 and is no longer available; use tolist([ ... ]) syntax to write a literal list") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.DynamicVal, fmt.Errorf("the \"list\" function was deprecated in Terraform v0.12 and is no longer available; use tolist([ ... ]) syntax to write a literal list") + }, +}) + +// MapFunc constructs a function that takes an even number of arguments and +// returns a map whose elements are constructed from consecutive pairs of arguments. +// +// This function is deprecated in Terraform v0.12 +var MapFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("the \"map\" function was deprecated in Terraform v0.12 and is no longer available; use tomap({ ... }) syntax to write a literal map") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.DynamicVal, fmt.Errorf("the \"map\" function was deprecated in Terraform v0.12 and is no longer available; use tomap({ ... }) syntax to write a literal map") + }, +}) // Length returns the number of elements in the given collection or number of // Unicode characters in the given string. diff --git a/lang/funcs/collection_test.go b/lang/funcs/collection_test.go index f74b3aafc..0b61738ac 100644 --- a/lang/funcs/collection_test.go +++ b/lang/funcs/collection_test.go @@ -2,10 +2,10 @@ package funcs import ( "fmt" + "math" "testing" "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/function" ) func TestLength(t *testing.T) { @@ -170,10 +170,28 @@ func TestAllTrue(t *testing.T) { cty.False, false, }, + { + cty.ListVal([]cty.Value{cty.True, cty.NullVal(cty.Bool)}), + cty.False, + false, + }, { cty.ListVal([]cty.Value{cty.UnknownVal(cty.Bool)}), cty.UnknownVal(cty.Bool), - true, + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.UnknownVal(cty.Bool), + }), + cty.UnknownVal(cty.Bool), + false, + }, + { + cty.UnknownVal(cty.List(cty.Bool)), + cty.UnknownVal(cty.Bool), + false, }, { cty.NullVal(cty.List(cty.Bool)), @@ -233,10 +251,36 @@ func TestAnyTrue(t *testing.T) { cty.True, false, }, + { + cty.ListVal([]cty.Value{cty.NullVal(cty.Bool), cty.True}), + cty.True, + false, + }, { cty.ListVal([]cty.Value{cty.UnknownVal(cty.Bool)}), cty.UnknownVal(cty.Bool), - true, + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.False, + }), + cty.UnknownVal(cty.Bool), + false, + }, + { + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.Bool), + cty.True, + }), + cty.True, + false, + }, + { + cty.UnknownVal(cty.List(cty.Bool)), + cty.UnknownVal(cty.Bool), + false, }, { cty.NullVal(cty.List(cty.Bool)), @@ -472,83 +516,6 @@ func TestIndex(t *testing.T) { } } -func TestList(t *testing.T) { - tests := []struct { - Values []cty.Value - Want cty.Value - Err bool - }{ - { - []cty.Value{ - cty.NilVal, - }, - cty.NilVal, - true, - }, - { - []cty.Value{ - cty.StringVal("Hello"), - }, - cty.ListVal([]cty.Value{ - cty.StringVal("Hello"), - }), - false, - }, - { - []cty.Value{ - cty.StringVal("Hello"), - cty.StringVal("World"), - }, - cty.ListVal([]cty.Value{ - cty.StringVal("Hello"), - cty.StringVal("World"), - }), - false, - }, - { - []cty.Value{ - cty.StringVal("Hello"), - cty.NumberIntVal(42), - }, - cty.ListVal([]cty.Value{ - cty.StringVal("Hello"), - cty.StringVal("42"), - }), - false, - }, - { - []cty.Value{ - cty.StringVal("Hello"), - cty.UnknownVal(cty.String), - }, - cty.ListVal([]cty.Value{ - cty.StringVal("Hello"), - cty.UnknownVal(cty.String), - }), - false, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("list(%#v)", test.Values), func(t *testing.T) { - got, err := List(test.Values...) - - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - func TestLookup(t *testing.T) { simpleMap := cty.MapVal(map[string]cty.Value{ "foo": cty.StringVal("bar"), @@ -802,169 +769,6 @@ func TestLookup(t *testing.T) { } } -func TestMap(t *testing.T) { - tests := []struct { - Values []cty.Value - Want cty.Value - Err bool - }{ - { - []cty.Value{ - cty.StringVal("hello"), - cty.StringVal("world"), - }, - cty.MapVal(map[string]cty.Value{ - "hello": cty.StringVal("world"), - }), - false, - }, - { - []cty.Value{ - cty.StringVal("hello"), - cty.UnknownVal(cty.String), - }, - cty.UnknownVal(cty.Map(cty.String)), - false, - }, - { - []cty.Value{ - cty.StringVal("hello"), - cty.StringVal("world"), - cty.StringVal("what's"), - cty.StringVal("up"), - }, - cty.MapVal(map[string]cty.Value{ - "hello": cty.StringVal("world"), - "what's": cty.StringVal("up"), - }), - false, - }, - { - []cty.Value{ - cty.StringVal("hello"), - cty.NumberIntVal(1), - cty.StringVal("goodbye"), - cty.NumberIntVal(42), - }, - cty.MapVal(map[string]cty.Value{ - "hello": cty.NumberIntVal(1), - "goodbye": cty.NumberIntVal(42), - }), - false, - }, - { // convert numbers to strings - []cty.Value{ - cty.StringVal("hello"), - cty.NumberIntVal(1), - cty.StringVal("goodbye"), - cty.StringVal("42"), - }, - cty.MapVal(map[string]cty.Value{ - "hello": cty.StringVal("1"), - "goodbye": cty.StringVal("42"), - }), - false, - }, - { // convert number keys to strings - []cty.Value{ - cty.NumberIntVal(1), - cty.StringVal("hello"), - cty.NumberIntVal(2), - cty.StringVal("goodbye"), - }, - cty.MapVal(map[string]cty.Value{ - "1": cty.StringVal("hello"), - "2": cty.StringVal("goodbye"), - }), - false, - }, - { // map of lists is okay - []cty.Value{ - cty.StringVal("hello"), - cty.ListVal([]cty.Value{ - cty.StringVal("world"), - }), - cty.StringVal("what's"), - cty.ListVal([]cty.Value{ - cty.StringVal("up"), - }), - }, - cty.MapVal(map[string]cty.Value{ - "hello": cty.ListVal([]cty.Value{cty.StringVal("world")}), - "what's": cty.ListVal([]cty.Value{cty.StringVal("up")}), - }), - false, - }, - { // map of maps is okay - []cty.Value{ - cty.StringVal("hello"), - cty.MapVal(map[string]cty.Value{ - "there": cty.StringVal("world"), - }), - cty.StringVal("what's"), - cty.MapVal(map[string]cty.Value{ - "really": cty.StringVal("up"), - }), - }, - cty.MapVal(map[string]cty.Value{ - "hello": cty.MapVal(map[string]cty.Value{ - "there": cty.StringVal("world"), - }), - "what's": cty.MapVal(map[string]cty.Value{ - "really": cty.StringVal("up"), - }), - }), - false, - }, - { // single argument returns an error - []cty.Value{ - cty.StringVal("hello"), - }, - cty.NilVal, - true, - }, - { // duplicate keys returns an error - []cty.Value{ - cty.StringVal("hello"), - cty.StringVal("world"), - cty.StringVal("hello"), - cty.StringVal("universe"), - }, - cty.NilVal, - true, - }, - { // null key returns an error - []cty.Value{ - cty.NullVal(cty.DynamicPseudoType), - cty.NumberIntVal(5), - }, - cty.NilVal, - true, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("map(%#v)", test.Values), func(t *testing.T) { - got, err := Map(test.Values...) - if test.Err { - if err == nil { - t.Fatal("succeeded; want error") - } - if _, ok := err.(function.PanicError); ok { - t.Fatalf("unexpected panic: %s", err) - } - return - } else if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !got.RawEquals(test.Want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) - } - }) - } -} - func TestMatchkeys(t *testing.T) { tests := []struct { Keys cty.Value @@ -1193,7 +997,7 @@ func TestSum(t *testing.T) { tests := []struct { List cty.Value Want cty.Value - Err bool + Err string }{ { cty.ListVal([]cty.Value{ @@ -1202,7 +1006,7 @@ func TestSum(t *testing.T) { cty.NumberIntVal(3), }), cty.NumberIntVal(6), - false, + "", }, { cty.ListVal([]cty.Value{ @@ -1213,7 +1017,7 @@ func TestSum(t *testing.T) { cty.NumberIntVal(234), }), cty.NumberIntVal(66685532), - false, + "", }, { cty.ListVal([]cty.Value{ @@ -1222,7 +1026,7 @@ func TestSum(t *testing.T) { cty.StringVal("c"), }), cty.UnknownVal(cty.String), - true, + "argument must be list, set, or tuple of number values", }, { cty.ListVal([]cty.Value{ @@ -1231,7 +1035,7 @@ func TestSum(t *testing.T) { cty.NumberIntVal(5), }), cty.NumberIntVal(-4), - false, + "", }, { cty.ListVal([]cty.Value{ @@ -1240,7 +1044,7 @@ func TestSum(t *testing.T) { cty.NumberFloatVal(5.7), }), cty.NumberFloatVal(35.3), - false, + "", }, { cty.ListVal([]cty.Value{ @@ -1249,12 +1053,20 @@ func TestSum(t *testing.T) { cty.NumberFloatVal(-5.7), }), cty.NumberFloatVal(-35.3), - false, + "", }, { cty.ListVal([]cty.Value{cty.NullVal(cty.Number)}), cty.NilVal, - true, + "argument must be list, set, or tuple of number values", + }, + { + cty.ListVal([]cty.Value{ + cty.NumberIntVal(5), + cty.NullVal(cty.Number), + }), + cty.NilVal, + "argument must be list, set, or tuple of number values", }, { cty.SetVal([]cty.Value{ @@ -1263,7 +1075,7 @@ func TestSum(t *testing.T) { cty.StringVal("c"), }), cty.UnknownVal(cty.String), - true, + "argument must be list, set, or tuple of number values", }, { cty.SetVal([]cty.Value{ @@ -1272,7 +1084,7 @@ func TestSum(t *testing.T) { cty.NumberIntVal(5), }), cty.NumberIntVal(-4), - false, + "", }, { cty.SetVal([]cty.Value{ @@ -1281,7 +1093,7 @@ func TestSum(t *testing.T) { cty.NumberIntVal(30), }), cty.NumberIntVal(65), - false, + "", }, { cty.SetVal([]cty.Value{ @@ -1290,14 +1102,14 @@ func TestSum(t *testing.T) { cty.NumberFloatVal(3), }), cty.NumberFloatVal(2354), - false, + "", }, { cty.SetVal([]cty.Value{ cty.NumberFloatVal(2), }), cty.NumberFloatVal(2), - false, + "", }, { cty.SetVal([]cty.Value{ @@ -1308,7 +1120,7 @@ func TestSum(t *testing.T) { cty.NumberFloatVal(-4), }), cty.NumberFloatVal(-199), - false, + "", }, { cty.TupleVal([]cty.Value{ @@ -1317,27 +1129,53 @@ func TestSum(t *testing.T) { cty.NumberIntVal(38), }), cty.UnknownVal(cty.String), - true, + "argument must be list, set, or tuple of number values", }, { cty.NumberIntVal(12), cty.NilVal, - true, + "cannot sum noniterable", }, { cty.ListValEmpty(cty.Number), cty.NilVal, - true, + "cannot sum an empty list", }, { cty.MapVal(map[string]cty.Value{"hello": cty.True}), cty.NilVal, - true, + "argument must be list, set, or tuple. Received map of bool", }, { cty.UnknownVal(cty.Number), cty.UnknownVal(cty.Number), - false, + "", + }, + { + cty.UnknownVal(cty.List(cty.Number)), + cty.UnknownVal(cty.Number), + "", + }, + { // known list containing unknown values + cty.ListVal([]cty.Value{cty.UnknownVal(cty.Number)}), + cty.UnknownVal(cty.Number), + "", + }, + { // numbers too large to represent as float64 + cty.ListVal([]cty.Value{ + cty.MustParseNumberVal("1e+500"), + cty.MustParseNumberVal("1e+500"), + }), + cty.MustParseNumberVal("2e+500"), + "", + }, + { // edge case we have a special error handler for + cty.ListVal([]cty.Value{ + cty.NumberFloatVal(math.Inf(1)), + cty.NumberFloatVal(math.Inf(-1)), + }), + cty.NilVal, + "can't compute sum of opposing infinities", }, } @@ -1345,9 +1183,11 @@ func TestSum(t *testing.T) { t.Run(fmt.Sprintf("sum(%#v)", test.List), func(t *testing.T) { got, err := Sum(test.List) - if test.Err { + if test.Err != "" { if err == nil { t.Fatal("succeeded; want error") + } else if got, want := err.Error(), test.Err; got != want { + t.Fatalf("wrong error\n got: %s\nwant: %s", got, want) } return } else if err != nil { diff --git a/lang/funcs/defaults.go b/lang/funcs/defaults.go new file mode 100644 index 000000000..0366ea661 --- /dev/null +++ b/lang/funcs/defaults.go @@ -0,0 +1,255 @@ +package funcs + +import ( + "fmt" + + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// DefaultsFunc is a helper function for substituting default values in +// place of null values in a given data structure. +// +// See the documentation for function Defaults for more information. +var DefaultsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "input", + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + { + Name: "defaults", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // The result type is guaranteed to be the same as the input type, + // since all we're doing is replacing null values with non-null + // values of the same type. + retType := args[0].Type() + defaultsType := args[1].Type() + + // This function is aimed at filling in object types or collections + // of object types where some of the attributes might be null, so + // it doesn't make sense to use a primitive type directly with it. + // (The "coalesce" function may be appropriate for such cases.) + if retType.IsPrimitiveType() { + // This error message is a bit of a fib because we can actually + // apply defaults to tuples too, but we expect that to be so + // unusual as to not be worth mentioning here, because mentioning + // it would require using some less-well-known Terraform language + // terminology in the message (tuple types, structural types). + return cty.DynamicPseudoType, function.NewArgErrorf(1, "only object types and collections of object types can have defaults applied") + } + + defaultsPath := make(cty.Path, 0, 4) // some capacity so that most structures won't reallocate + if err := defaultsAssertSuitableFallback(retType, defaultsType, defaultsPath); err != nil { + errMsg := tfdiags.FormatError(err) // add attribute path prefix + return cty.DynamicPseudoType, function.NewArgErrorf(1, "%s", errMsg) + } + + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if args[0].Type().HasDynamicTypes() { + // If the types our input object aren't known yet for some reason + // then we'll defer all of our work here, because our + // interpretation of the defaults depends on the types in + // the input. + return cty.UnknownVal(retType), nil + } + + v := defaultsApply(args[0], args[1]) + return v, nil + }, +}) + +func defaultsApply(input, fallback cty.Value) cty.Value { + wantTy := input.Type() + if !(input.IsKnown() && fallback.IsKnown()) { + return cty.UnknownVal(wantTy) + } + + // For the rest of this function we're assuming that the given defaults + // will always be valid, because we expect to have caught any problems + // during the type checking phase. Any inconsistencies that reach here are + // therefore considered to be implementation bugs, and so will panic. + + // Our strategy depends on the kind of type we're working with. + switch { + case wantTy.IsPrimitiveType(): + // For leaf primitive values the rule is relatively simple: use the + // input if it's non-null, or fallback if input is null. + if !input.IsNull() { + return input + } + v, err := convert.Convert(fallback, wantTy) + if err != nil { + // Should not happen because we checked in defaultsAssertSuitableFallback + panic(err.Error()) + } + return v + + case wantTy.IsObjectType(): + atys := wantTy.AttributeTypes() + ret := map[string]cty.Value{} + for attr, aty := range atys { + inputSub := input.GetAttr(attr) + fallbackSub := cty.NullVal(aty) + if fallback.Type().HasAttribute(attr) { + fallbackSub = fallback.GetAttr(attr) + } + ret[attr] = defaultsApply(inputSub, fallbackSub) + } + return cty.ObjectVal(ret) + + case wantTy.IsTupleType(): + l := wantTy.Length() + ret := make([]cty.Value, l) + for i := 0; i < l; i++ { + inputSub := input.Index(cty.NumberIntVal(int64(i))) + fallbackSub := fallback.Index(cty.NumberIntVal(int64(i))) + ret[i] = defaultsApply(inputSub, fallbackSub) + } + return cty.TupleVal(ret) + + case wantTy.IsCollectionType(): + // For collection types we apply a single fallback value to each + // element of the input collection, because in the situations this + // function is intended for we assume that the number of elements + // is the caller's decision, and so we'll just apply the same defaults + // to all of the elements. + ety := wantTy.ElementType() + switch { + case wantTy.IsMapType(): + newVals := map[string]cty.Value{} + + for it := input.ElementIterator(); it.Next(); { + k, v := it.Element() + newVals[k.AsString()] = defaultsApply(v, fallback) + } + + if len(newVals) == 0 { + return cty.MapValEmpty(ety) + } + return cty.MapVal(newVals) + case wantTy.IsListType(), wantTy.IsSetType(): + var newVals []cty.Value + + for it := input.ElementIterator(); it.Next(); { + _, v := it.Element() + newV := defaultsApply(v, fallback) + newVals = append(newVals, newV) + } + + if len(newVals) == 0 { + if wantTy.IsSetType() { + return cty.SetValEmpty(ety) + } + return cty.ListValEmpty(ety) + } + if wantTy.IsSetType() { + return cty.SetVal(newVals) + } + return cty.ListVal(newVals) + default: + // There are no other collection types, so this should not happen + panic(fmt.Sprintf("invalid collection type %#v", wantTy)) + } + default: + // We should've caught anything else in defaultsAssertSuitableFallback, + // so this should not happen. + panic(fmt.Sprintf("invalid target type %#v", wantTy)) + } +} + +func defaultsAssertSuitableFallback(wantTy, fallbackTy cty.Type, fallbackPath cty.Path) error { + // If the type we want is a collection type then we need to keep peeling + // away collection type wrappers until we find the non-collection-type + // that's underneath, which is what the fallback will actually be applied + // to. + inCollection := false + for wantTy.IsCollectionType() { + wantTy = wantTy.ElementType() + inCollection = true + } + + switch { + case wantTy.IsPrimitiveType(): + // The fallback is valid if it's equal to or convertible to what we want. + if fallbackTy.Equals(wantTy) { + return nil + } + conversion := convert.GetConversionUnsafe(fallbackTy, wantTy) + if conversion == nil { + msg := convert.MismatchMessage(fallbackTy, wantTy) + return fallbackPath.NewErrorf("invalid default value for %s: %s", wantTy.FriendlyName(), msg) + } + return nil + case wantTy.IsObjectType(): + if !fallbackTy.IsObjectType() { + if inCollection { + return fallbackPath.NewErrorf("the default value for a collection of an object type must itself be an object type, not %s", fallbackTy.FriendlyName()) + } + return fallbackPath.NewErrorf("the default value for an object type must itself be an object type, not %s", fallbackTy.FriendlyName()) + } + for attr, wantAty := range wantTy.AttributeTypes() { + if !fallbackTy.HasAttribute(attr) { + continue // it's always okay to not have a default value + } + fallbackSubpath := fallbackPath.GetAttr(attr) + fallbackSubTy := fallbackTy.AttributeType(attr) + err := defaultsAssertSuitableFallback(wantAty, fallbackSubTy, fallbackSubpath) + if err != nil { + return err + } + } + for attr := range fallbackTy.AttributeTypes() { + if !wantTy.HasAttribute(attr) { + fallbackSubpath := fallbackPath.GetAttr(attr) + return fallbackSubpath.NewErrorf("target type does not expect an attribute named %q", attr) + } + } + return nil + case wantTy.IsTupleType(): + if !fallbackTy.IsTupleType() { + if inCollection { + return fallbackPath.NewErrorf("the default value for a collection of a tuple type must itself be a tuple type, not %s", fallbackTy.FriendlyName()) + } + return fallbackPath.NewErrorf("the default value for a tuple type must itself be a tuple type, not %s", fallbackTy.FriendlyName()) + } + wantEtys := wantTy.TupleElementTypes() + fallbackEtys := fallbackTy.TupleElementTypes() + if got, want := len(wantEtys), len(fallbackEtys); got != want { + return fallbackPath.NewErrorf("the default value for a tuple type of length %d must also have length %d, not %d", want, want, got) + } + for i := 0; i < len(wantEtys); i++ { + fallbackSubpath := fallbackPath.IndexInt(i) + wantSubTy := wantEtys[i] + fallbackSubTy := fallbackEtys[i] + err := defaultsAssertSuitableFallback(wantSubTy, fallbackSubTy, fallbackSubpath) + if err != nil { + return err + } + } + return nil + default: + // No other types are supported right now. + return fallbackPath.NewErrorf("cannot apply defaults to %s", wantTy.FriendlyName()) + } +} + +// Defaults is a helper function for substituting default values in +// place of null values in a given data structure. +// +// This is primarily intended for use with a module input variable that +// has an object type constraint (or a collection thereof) that has optional +// attributes, so that the receiver of a value that omits those attributes +// can insert non-null default values in place of the null values caused by +// omitting the attributes. +func Defaults(input, defaults cty.Value) (cty.Value, error) { + return DefaultsFunc.Call([]cty.Value{input, defaults}) +} diff --git a/lang/funcs/defaults_test.go b/lang/funcs/defaults_test.go new file mode 100644 index 000000000..ca0c3be65 --- /dev/null +++ b/lang/funcs/defaults_test.go @@ -0,0 +1,396 @@ +package funcs + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestDefaults(t *testing.T) { + tests := []struct { + Input, Defaults cty.Value + Want cty.Value + WantErr string + }{ + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hey"), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hey"), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{}), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{}), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.String), + }), + WantErr: `.a: target type does not expect an attribute named "a"`, + }, + + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + cty.StringVal("hey"), + cty.StringVal("hello"), + }), + }), + }, + { + // Using defaults with single set elements is a pretty + // odd thing to do, but this behavior is just here because + // it generalizes from how we handle collections. It's + // tested only to ensure it doesn't change accidentally + // in future. + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.StringVal("hey"), + cty.StringVal("hello"), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "x": cty.NullVal(cty.String), + "y": cty.StringVal("hey"), + "z": cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "x": cty.StringVal("hello"), + "y": cty.StringVal("hey"), + "z": cty.StringVal("hello"), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + }, + { + Input: cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + Want: cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("boop"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("boop"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.SetVal([]cty.Value{ + // After applying defaults, the one with a null value + // coalesced with the one with a non-null value, + // and so there's only one left. + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + "beep": cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "boop": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + "beep": cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hello"), + }), + }), + }), + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.NullVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "b": cty.StringVal("hey"), + }), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + WantErr: `.a: the default value for a collection of an object type must itself be an object type, not string`, + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.ListVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + // The default value for a list must be a single value + // of the list's element type which provides defaults + // for each element separately, so the default for a + // list of string should be just a single string, not + // a list of string. + "a": cty.ListVal([]cty.Value{ + cty.StringVal("hello"), + }), + }), + WantErr: `.a: invalid default value for string: string required`, + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.StringVal("hello"), + }), + WantErr: `.a: the default value for a tuple type must itself be a tuple type, not string`, + }, + { + Input: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.NullVal(cty.String), + cty.StringVal("hey"), + cty.NullVal(cty.String), + }), + }), + Defaults: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.StringVal("hello 0"), + cty.StringVal("hello 1"), + cty.StringVal("hello 2"), + }), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "a": cty.TupleVal([]cty.Value{ + cty.StringVal("hello 0"), + cty.StringVal("hey"), + cty.StringVal("hello 2"), + }), + }), + }, + { + // There's no reason to use this function for plain primitive + // types, because the "default" argument in a variable definition + // already has the equivalent behavior. This function is only + // to deal with the situation of a complex-typed variable where + // only parts of the data structure are optional. + Input: cty.NullVal(cty.String), + Defaults: cty.StringVal("hello"), + WantErr: `only object types and collections of object types can have defaults applied`, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("defaults(%#v, %#v)", test.Input, test.Defaults), func(t *testing.T) { + got, gotErr := Defaults(test.Input, test.Defaults) + + if test.WantErr != "" { + if gotErr == nil { + t.Fatalf("unexpected success\nwant error: %s", test.WantErr) + } + if got, want := gotErr.Error(), test.WantErr; got != want { + t.Fatalf("wrong error\ngot: %s\nwant: %s", got, want) + } + return + } else if gotErr != nil { + t.Fatalf("unexpected error\ngot: %s", gotErr.Error()) + } + + if !test.Want.RawEquals(got) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} diff --git a/lang/functions.go b/lang/functions.go index d48cace9b..3a604f7c5 100644 --- a/lang/functions.go +++ b/lang/functions.go @@ -9,6 +9,7 @@ import ( "github.com/zclconf/go-cty/cty/function" "github.com/zclconf/go-cty/cty/function/stdlib" + "github.com/hashicorp/terraform/experiments" "github.com/hashicorp/terraform/lang/funcs" ) @@ -55,6 +56,7 @@ func (s *Scope) Functions() map[string]function.Function { "concat": stdlib.ConcatFunc, "contains": stdlib.ContainsFunc, "csvdecode": stdlib.CSVDecodeFunc, + "defaults": s.experimentalFunction(experiments.ModuleVariableOptionalAttrs, funcs.DefaultsFunc), "dirname": funcs.DirnameFunc, "distinct": stdlib.DistinctFunc, "element": stdlib.ElementFunc, @@ -160,11 +162,31 @@ func (s *Scope) Functions() map[string]function.Function { return s.funcs } -var unimplFunc = function.New(&function.Spec{ - Type: func([]cty.Value) (cty.Type, error) { - return cty.DynamicPseudoType, fmt.Errorf("function not yet implemented") - }, - Impl: func([]cty.Value, cty.Type) (cty.Value, error) { - return cty.DynamicVal, fmt.Errorf("function not yet implemented") - }, -}) +// experimentalFunction checks whether the given experiment is enabled for +// the recieving scope. If so, it will return the given function verbatim. +// If not, it will return a placeholder function that just returns an +// error explaining that the function requires the experiment to be enabled. +func (s *Scope) experimentalFunction(experiment experiments.Experiment, fn function.Function) function.Function { + if s.activeExperiments.Has(experiment) { + return fn + } + + err := fmt.Errorf( + "this function is experimental and available only when the experiment keyword %s is enabled for the current module", + experiment.Keyword(), + ) + + return function.New(&function.Spec{ + Params: fn.Params(), + VarParam: fn.VarParam(), + Type: func(args []cty.Value) (cty.Type, error) { + return cty.DynamicPseudoType, err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // It would be weird to get here because the Type function always + // fails, but we'll return an error here too anyway just to be + // robust. + return cty.DynamicVal, err + }, + }) +} diff --git a/lang/functions_test.go b/lang/functions_test.go index 4690a4a05..8f3950eb4 100644 --- a/lang/functions_test.go +++ b/lang/functions_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/experiments" homedir "github.com/mitchellh/go-homedir" "github.com/zclconf/go-cty/cty" ) @@ -224,7 +225,7 @@ func TestFunctions(t *testing.T) { "coalescelist": { { - `coalescelist(list("a", "b"), list("c", "d"))`, + `coalescelist(tolist(["a", "b"]), tolist(["c", "d"]))`, cty.ListVal([]cty.Value{ cty.StringVal("a"), cty.StringVal("b"), @@ -289,6 +290,18 @@ func TestFunctions(t *testing.T) { }, }, + "defaults": { + // This function is pretty specialized and so this is mainly + // just a test that it is defined at all. See the function's + // own unit tests for more interesting test cases. + { + `defaults({a: 4}, {a: 5})`, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NumberIntVal(4), + }), + }, + }, + "dirname": { { `dirname("testdata/hello.txt")`, @@ -512,12 +525,8 @@ func TestFunctions(t *testing.T) { }, "list": { - { - `list("hello")`, - cty.ListVal([]cty.Value{ - cty.StringVal("hello"), - }), - }, + // There are intentionally no test cases for "list" because + // it is a stub that always returns an error. }, "log": { @@ -542,12 +551,8 @@ func TestFunctions(t *testing.T) { }, "map": { - { - `map("hello", "world")`, - cty.MapVal(map[string]cty.Value{ - "hello": cty.StringVal("world"), - }), - }, + // There are intentionally no test cases for "map" because + // it is a stub that always returns an error. }, "matchkeys": { @@ -759,7 +764,7 @@ func TestFunctions(t *testing.T) { "slice": { { // force a list type here for testing - `slice(list("a", "b", "c", "d"), 1, 3)`, + `slice(tolist(["a", "b", "c", "d"]), 1, 3)`, cty.ListVal([]cty.Value{ cty.StringVal("b"), cty.StringVal("c"), }), @@ -1047,32 +1052,89 @@ func TestFunctions(t *testing.T) { }, } - data := &dataForTests{} // no variables available; we only need literals here - scope := &Scope{ - Data: data, - BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem - } + experimentalFuncs := map[string]experiments.Experiment{} + experimentalFuncs["defaults"] = experiments.ModuleVariableOptionalAttrs - // Check that there is at least one test case for each function, omitting - // those functions that do not return consistent values - allFunctions := scope.Functions() - - // TODO: we can test the impure functions partially by configuring the scope - // with PureOnly: true and then verify that they return unknown values of a - // suitable type. - for _, impureFunc := range impureFunctions { - delete(allFunctions, impureFunc) - } - for f, _ := range scope.Functions() { - if _, ok := tests[f]; !ok { - t.Errorf("Missing test for function %s\n", f) + t.Run("all functions are tested", func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem } - } + + // Check that there is at least one test case for each function, omitting + // those functions that do not return consistent values + allFunctions := scope.Functions() + + // TODO: we can test the impure functions partially by configuring the scope + // with PureOnly: true and then verify that they return unknown values of a + // suitable type. + for _, impureFunc := range impureFunctions { + delete(allFunctions, impureFunc) + } + for f := range scope.Functions() { + if _, ok := tests[f]; !ok { + t.Errorf("Missing test for function %s\n", f) + } + } + }) for funcName, funcTests := range tests { t.Run(funcName, func(t *testing.T) { + + // prepareScope starts as a no-op, but if a function is marked as + // experimental in our experimentalFuncs table above then we'll + // reassign this to be a function that activates the appropriate + // experiment. + prepareScope := func(t *testing.T, scope *Scope) {} + + if experiment, isExperimental := experimentalFuncs[funcName]; isExperimental { + // First, we'll run all of the tests without the experiment + // enabled to see that they do actually fail in that case. + for _, test := range funcTests { + testName := fmt.Sprintf("experimental(%s)", test.src) + t.Run(testName, func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem + } + + expr, parseDiags := hclsyntax.ParseExpression([]byte(test.src), "test.hcl", hcl.Pos{Line: 1, Column: 1}) + if parseDiags.HasErrors() { + for _, diag := range parseDiags { + t.Error(diag.Error()) + } + return + } + + _, diags := scope.EvalExpr(expr, cty.DynamicPseudoType) + if !diags.HasErrors() { + t.Errorf("experimental function %q succeeded without its experiment %s enabled\nexpr: %s", funcName, experiment.Keyword(), test.src) + } + }) + } + + // Now make the experiment active in the scope so that the + // function will actually work when we test it below. + prepareScope = func(t *testing.T, scope *Scope) { + t.Helper() + t.Logf("activating experiment %s to test %q", experiment.Keyword(), funcName) + experimentsSet := experiments.NewSet() + experimentsSet.Add(experiment) + scope.SetActiveExperiments(experimentsSet) + } + } + for _, test := range funcTests { t.Run(test.src, func(t *testing.T) { + data := &dataForTests{} // no variables available; we only need literals here + scope := &Scope{ + Data: data, + BaseDir: "./testdata/functions-test", // for the functions that read from the filesystem + } + prepareScope(t, scope) + expr, parseDiags := hclsyntax.ParseExpression([]byte(test.src), "test.hcl", hcl.Pos{Line: 1, Column: 1}) if parseDiags.HasErrors() { for _, diag := range parseDiags { diff --git a/lang/scope.go b/lang/scope.go index 98fca6baa..103d2529c 100644 --- a/lang/scope.go +++ b/lang/scope.go @@ -6,6 +6,7 @@ import ( "github.com/zclconf/go-cty/cty/function" "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/experiments" ) // Scope is the main type in this package, allowing dynamic evaluation of @@ -31,4 +32,16 @@ type Scope struct { funcs map[string]function.Function funcsLock sync.Mutex + + // activeExperiments is an optional set of experiments that should be + // considered as active in the module that this scope will be used for. + // Callers can populate it by calling the SetActiveExperiments method. + activeExperiments experiments.Set +} + +// SetActiveExperiments allows a caller to declare that a set of experiments +// is active for the module that the receiving Scope belongs to, which might +// then cause the scope to activate some additional experimental behaviors. +func (s *Scope) SetActiveExperiments(active experiments.Set) { + s.activeExperiments = active } diff --git a/main.go b/main.go index 183698408..12b13d169 100644 --- a/main.go +++ b/main.go @@ -17,7 +17,9 @@ import ( "github.com/hashicorp/terraform/command/cliconfig" "github.com/hashicorp/terraform/command/format" "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/internal/didyoumean" "github.com/hashicorp/terraform/internal/logging" + "github.com/hashicorp/terraform/internal/terminal" "github.com/hashicorp/terraform/version" "github.com/mattn/go-shellwords" "github.com/mitchellh/cli" @@ -33,8 +35,25 @@ const ( // The parent process will create a file to collect crash logs envTmpLogPath = "TF_TEMP_LOG_PATH" + + // Environment variable name used for smuggling true stderr terminal + // settings into a panicwrap child process. This is an implementation + // detail, subject to change in future, and should not ever be directly + // set by an end-user. + envTerminalPanicwrapWorkaround = "TF_PANICWRAP_STDERR" ) +// ui wraps the primary output cli.Ui, and redirects Warn calls to Output +// calls. This ensures that warnings are sent to stdout, and are properly +// serialized within the stdout stream. +type ui struct { + cli.Ui +} + +func (u *ui) Warn(msg string) { + u.Ui.Output(msg) +} + func main() { os.Exit(realMain()) } @@ -52,7 +71,7 @@ func realMain() int { // there is a panic. Otherwise, we delete it. logTempFile, err := ioutil.TempFile("", "terraform-log") if err != nil { - fmt.Fprintf(os.Stderr, "Couldn't setup logging tempfile: %s", err) + fmt.Fprintf(os.Stderr, "Couldn't set up logging tempfile: %s", err) return 1 } // Now that we have the file, close it and leave it for the wrapped @@ -63,6 +82,22 @@ func realMain() int { // store the path in the environment for the wrapped executable os.Setenv(envTmpLogPath, logTempFile.Name()) + // We also need to do our terminal initialization before we fork, + // because the child process doesn't necessarily have access to + // the true stderr in order to initialize it. + streams, err := terminal.Init() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to initialize terminal: %s", err) + return 1 + } + + // We need the child process to behave _as if_ connected to the real + // stderr, even though panicwrap is about to add a pipe in the way, + // so we'll smuggle the true stderr information in an environment + // varible. + streamState := streams.StateForAfterPanicWrap() + os.Setenv(envTerminalPanicwrapWorkaround, fmt.Sprintf("%t:%d", streamState.StderrIsTerminal, streamState.StderrWidth)) + // Create the configuration for panicwrap and wrap our executable wrapConfig.Handler = logging.PanicHandler(logTempFile.Name()) wrapConfig.IgnoreSignals = ignoreSignals @@ -81,11 +116,11 @@ func realMain() int { } func init() { - Ui = &cli.BasicUi{ + Ui = &ui{&cli.BasicUi{ Writer: os.Stdout, ErrorWriter: os.Stderr, Reader: os.Stdin, - } + }} } func wrappedMain() int { @@ -105,11 +140,44 @@ func wrappedMain() int { } log.Printf( - "[INFO] Terraform version: %s %s %s", - Version, VersionPrerelease, GitCommit) + "[INFO] Terraform version: %s %s", + Version, VersionPrerelease) log.Printf("[INFO] Go runtime version: %s", runtime.Version()) log.Printf("[INFO] CLI args: %#v", os.Args) + // This is the recieving end of our workaround to retain the metadata + // about the real stderr even though we're talking to it via the panicwrap + // pipe. See the call to StateForAfterPanicWrap above for the producer + // part of this. + var streamState *terminal.PrePanicwrapState + if raw := os.Getenv(envTerminalPanicwrapWorkaround); raw != "" { + streamState = &terminal.PrePanicwrapState{} + if _, err := fmt.Sscanf(raw, "%t:%d", &streamState.StderrIsTerminal, &streamState.StderrWidth); err != nil { + log.Printf("[WARN] %s is set but is incorrectly-formatted: %s", envTerminalPanicwrapWorkaround, err) + streamState = nil // leave it unset for a normal init, then + } + } + streams, err := terminal.ReinitInsidePanicwrap(streamState) + if err != nil { + Ui.Error(fmt.Sprintf("Failed to configure the terminal: %s", err)) + return 1 + } + if streams.Stdout.IsTerminal() { + log.Printf("[TRACE] Stdout is a terminal of width %d", streams.Stdout.Columns()) + } else { + log.Printf("[TRACE] Stdout is not a terminal") + } + if streams.Stderr.IsTerminal() { + log.Printf("[TRACE] Stderr is a terminal of width %d", streams.Stderr.Columns()) + } else { + log.Printf("[TRACE] Stderr is not a terminal") + } + if streams.Stdin.IsTerminal() { + log.Printf("[TRACE] Stdin is a terminal") + } else { + log.Printf("[TRACE] Stdin is not a terminal") + } + // NOTE: We're intentionally calling LoadConfig _before_ handling a possible // -chdir=... option on the command line, so that a possible relative // path in the TERRAFORM_CONFIG_FILE environment variable (though probably @@ -223,7 +291,7 @@ func wrappedMain() int { // in case they need to refer back to it for any special reason, though // they should primarily be working with the override working directory // that we've now switched to above. - initCommands(originalWd, config, services, providerSrc, providerDevOverrides, unmanagedProviders) + initCommands(originalWd, streams, config, services, providerSrc, providerDevOverrides, unmanagedProviders) } // Run checkpoint @@ -282,6 +350,32 @@ func wrappedMain() int { AutocompleteUninstall: "uninstall-autocomplete", } + // Before we continue we'll check whether the requested command is + // actually known. If not, we might be able to suggest an alternative + // if it seems like the user made a typo. + // (This bypasses the built-in help handling in cli.CLI for the situation + // where a command isn't found, because it's likely more helpful to + // mention what specifically went wrong, rather than just printing out + // a big block of usage information.) + if cmd := cliRunner.Subcommand(); cmd != "" { + // Due to the design of cli.CLI, this special error message only works + // for typos of top-level commands. For a subcommand typo, like + // "terraform state posh", cmd would be "state" here and thus would + // be considered to exist, and it would print out its own usage message. + if _, exists := Commands[cmd]; !exists { + suggestions := make([]string, 0, len(Commands)) + for name := range Commands { + suggestions = append(suggestions, name) + } + suggestion := didyoumean.NameSuggestion(cmd, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + fmt.Fprintf(os.Stderr, "Terraform has no command named %q.%s\n\nTo see all of Terraform's top-level commands, run:\n terraform -help\n\n", cmd, suggestion) + return 1 + } + } + exitCode, err := cliRunner.Run() if err != nil { Ui.Error(fmt.Sprintf("Error executing CLI: %s", err.Error())) diff --git a/main_test.go b/main_test.go index 406195e8b..321ce039f 100644 --- a/main_test.go +++ b/main_test.go @@ -10,14 +10,14 @@ import ( ) func TestMain_cliArgsFromEnv(t *testing.T) { - // Setup the state. This test really messes with the environment and + // Set up the state. This test really messes with the environment and // global state so we set things up to be restored. // Restore original CLI args oldArgs := os.Args defer func() { os.Args = oldArgs }() - // Setup test command and restore that + // Set up test command and restore that Commands = make(map[string]cli.CommandFactory) defer func() { Commands = nil @@ -122,7 +122,7 @@ func TestMain_cliArgsFromEnv(t *testing.T) { } } - // Setup the args + // Set up the args args := make([]string, len(tc.Args)+1) args[0] = oldArgs[0] // process name copy(args[1:], tc.Args) @@ -153,7 +153,7 @@ func TestMain_cliArgsFromEnvAdvanced(t *testing.T) { oldArgs := os.Args defer func() { os.Args = oldArgs }() - // Setup test command and restore that + // Set up test command and restore that Commands = make(map[string]cli.CommandFactory) defer func() { Commands = nil @@ -211,7 +211,7 @@ func TestMain_cliArgsFromEnvAdvanced(t *testing.T) { for i, tc := range cases { t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - // Setup test command and restore that + // Set up test command and restore that testCommandName := tc.Command testCommand := &testCommandCLI{} defer func() { delete(Commands, testCommandName) }() @@ -229,7 +229,7 @@ func TestMain_cliArgsFromEnvAdvanced(t *testing.T) { } } - // Setup the args + // Set up the args args := make([]string, len(tc.Args)+1) args[0] = oldArgs[0] // process name copy(args[1:], tc.Args) @@ -264,3 +264,20 @@ func (c *testCommandCLI) Run(args []string) int { func (c *testCommandCLI) Synopsis() string { return "" } func (c *testCommandCLI) Help() string { return "" } + +func TestWarnOutput(t *testing.T) { + mock := cli.NewMockUi() + wrapped := &ui{mock} + wrapped.Warn("WARNING") + + stderr := mock.ErrorWriter.String() + stdout := mock.OutputWriter.String() + + if stderr != "" { + t.Fatalf("unexpected stderr: %q", stderr) + } + + if stdout != "WARNING\n" { + t.Fatalf("unexpected stdout: %q\n", stdout) + } +} diff --git a/plans/changes.go b/plans/changes.go index 414099488..72b6a8938 100644 --- a/plans/changes.go +++ b/plans/changes.go @@ -39,7 +39,7 @@ func (c *Changes) Empty() bool { } for _, out := range c.Outputs { - if out.Action != NoOp { + if out.Addr.Module.IsRoot() && out.Action != NoOp { return false } } diff --git a/plans/dynamic_value_test.go b/plans/dynamic_value_test.go deleted file mode 100644 index bde22b12c..000000000 --- a/plans/dynamic_value_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package plans - -import ( - "github.com/zclconf/go-cty/cty" -) - -func mustNewDynamicValue(val cty.Value, ty cty.Type) DynamicValue { - ret, err := NewDynamicValue(val, ty) - if err != nil { - panic(err) - } - return ret -} diff --git a/plans/internal/planproto/planfile.pb.go b/plans/internal/planproto/planfile.pb.go index 950d08262..9fc4daf59 100644 --- a/plans/internal/planproto/planfile.pb.go +++ b/plans/internal/planproto/planfile.pb.go @@ -1,24 +1,29 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.14.0 // source: planfile.proto package planproto import ( - fmt "fmt" proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Action describes the type of action planned for an object. // Not all action values are valid for all object types. @@ -34,61 +39,107 @@ const ( Action_CREATE_THEN_DELETE Action = 7 ) -var Action_name = map[int32]string{ - 0: "NOOP", - 1: "CREATE", - 2: "READ", - 3: "UPDATE", - 5: "DELETE", - 6: "DELETE_THEN_CREATE", - 7: "CREATE_THEN_DELETE", -} +// Enum value maps for Action. +var ( + Action_name = map[int32]string{ + 0: "NOOP", + 1: "CREATE", + 2: "READ", + 3: "UPDATE", + 5: "DELETE", + 6: "DELETE_THEN_CREATE", + 7: "CREATE_THEN_DELETE", + } + Action_value = map[string]int32{ + "NOOP": 0, + "CREATE": 1, + "READ": 2, + "UPDATE": 3, + "DELETE": 5, + "DELETE_THEN_CREATE": 6, + "CREATE_THEN_DELETE": 7, + } +) -var Action_value = map[string]int32{ - "NOOP": 0, - "CREATE": 1, - "READ": 2, - "UPDATE": 3, - "DELETE": 5, - "DELETE_THEN_CREATE": 6, - "CREATE_THEN_DELETE": 7, +func (x Action) Enum() *Action { + p := new(Action) + *p = x + return p } func (x Action) String() string { - return proto.EnumName(Action_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (Action) Descriptor() protoreflect.EnumDescriptor { + return file_planfile_proto_enumTypes[0].Descriptor() +} + +func (Action) Type() protoreflect.EnumType { + return &file_planfile_proto_enumTypes[0] +} + +func (x Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Action.Descriptor instead. func (Action) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{0} + return file_planfile_proto_rawDescGZIP(), []int{0} } type ResourceInstanceChange_ResourceMode int32 const ( - ResourceInstanceChange_managed ResourceInstanceChange_ResourceMode = 0 - ResourceInstanceChange_data ResourceInstanceChange_ResourceMode = 1 + ResourceInstanceChange_managed ResourceInstanceChange_ResourceMode = 0 // for "resource" blocks in configuration + ResourceInstanceChange_data ResourceInstanceChange_ResourceMode = 1 // for "data" blocks in configuration ) -var ResourceInstanceChange_ResourceMode_name = map[int32]string{ - 0: "managed", - 1: "data", -} +// Enum value maps for ResourceInstanceChange_ResourceMode. +var ( + ResourceInstanceChange_ResourceMode_name = map[int32]string{ + 0: "managed", + 1: "data", + } + ResourceInstanceChange_ResourceMode_value = map[string]int32{ + "managed": 0, + "data": 1, + } +) -var ResourceInstanceChange_ResourceMode_value = map[string]int32{ - "managed": 0, - "data": 1, +func (x ResourceInstanceChange_ResourceMode) Enum() *ResourceInstanceChange_ResourceMode { + p := new(ResourceInstanceChange_ResourceMode) + *p = x + return p } func (x ResourceInstanceChange_ResourceMode) String() string { - return proto.EnumName(ResourceInstanceChange_ResourceMode_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (ResourceInstanceChange_ResourceMode) Descriptor() protoreflect.EnumDescriptor { + return file_planfile_proto_enumTypes[1].Descriptor() +} + +func (ResourceInstanceChange_ResourceMode) Type() protoreflect.EnumType { + return &file_planfile_proto_enumTypes[1] +} + +func (x ResourceInstanceChange_ResourceMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceInstanceChange_ResourceMode.Descriptor instead. func (ResourceInstanceChange_ResourceMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{3, 0} + return file_planfile_proto_rawDescGZIP(), []int{3, 0} } // Plan is the root message type for the tfplan file type Plan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Version is incremented whenever there is a breaking change to // the serialization format. Programs reading serialized plans should // verify that version is set to the expected value and abort processing @@ -121,145 +172,157 @@ type Plan struct { ProviderHashes map[string]*Hash `protobuf:"bytes,15,rep,name=provider_hashes,json=providerHashes,proto3" json:"provider_hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Backend is a description of the backend configuration and other related // settings at the time the plan was created. - Backend *Backend `protobuf:"bytes,13,opt,name=backend,proto3" json:"backend,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Backend *Backend `protobuf:"bytes,13,opt,name=backend,proto3" json:"backend,omitempty"` } -func (m *Plan) Reset() { *m = Plan{} } -func (m *Plan) String() string { return proto.CompactTextString(m) } -func (*Plan) ProtoMessage() {} +func (x *Plan) Reset() { + *x = Plan{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Plan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Plan) ProtoMessage() {} + +func (x *Plan) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Plan.ProtoReflect.Descriptor instead. func (*Plan) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{0} + return file_planfile_proto_rawDescGZIP(), []int{0} } -func (m *Plan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Plan.Unmarshal(m, b) -} -func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Plan.Marshal(b, m, deterministic) -} -func (m *Plan) XXX_Merge(src proto.Message) { - xxx_messageInfo_Plan.Merge(m, src) -} -func (m *Plan) XXX_Size() int { - return xxx_messageInfo_Plan.Size(m) -} -func (m *Plan) XXX_DiscardUnknown() { - xxx_messageInfo_Plan.DiscardUnknown(m) -} - -var xxx_messageInfo_Plan proto.InternalMessageInfo - -func (m *Plan) GetVersion() uint64 { - if m != nil { - return m.Version +func (x *Plan) GetVersion() uint64 { + if x != nil { + return x.Version } return 0 } -func (m *Plan) GetVariables() map[string]*DynamicValue { - if m != nil { - return m.Variables +func (x *Plan) GetVariables() map[string]*DynamicValue { + if x != nil { + return x.Variables } return nil } -func (m *Plan) GetResourceChanges() []*ResourceInstanceChange { - if m != nil { - return m.ResourceChanges +func (x *Plan) GetResourceChanges() []*ResourceInstanceChange { + if x != nil { + return x.ResourceChanges } return nil } -func (m *Plan) GetOutputChanges() []*OutputChange { - if m != nil { - return m.OutputChanges +func (x *Plan) GetOutputChanges() []*OutputChange { + if x != nil { + return x.OutputChanges } return nil } -func (m *Plan) GetTargetAddrs() []string { - if m != nil { - return m.TargetAddrs +func (x *Plan) GetTargetAddrs() []string { + if x != nil { + return x.TargetAddrs } return nil } -func (m *Plan) GetTerraformVersion() string { - if m != nil { - return m.TerraformVersion +func (x *Plan) GetTerraformVersion() string { + if x != nil { + return x.TerraformVersion } return "" } -func (m *Plan) GetProviderHashes() map[string]*Hash { - if m != nil { - return m.ProviderHashes +func (x *Plan) GetProviderHashes() map[string]*Hash { + if x != nil { + return x.ProviderHashes } return nil } -func (m *Plan) GetBackend() *Backend { - if m != nil { - return m.Backend +func (x *Plan) GetBackend() *Backend { + if x != nil { + return x.Backend } return nil } // Backend is a description of backend configuration and other related settings. type Backend struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - Workspace string `protobuf:"bytes,3,opt,name=workspace,proto3" json:"workspace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Workspace string `protobuf:"bytes,3,opt,name=workspace,proto3" json:"workspace,omitempty"` } -func (m *Backend) Reset() { *m = Backend{} } -func (m *Backend) String() string { return proto.CompactTextString(m) } -func (*Backend) ProtoMessage() {} +func (x *Backend) Reset() { + *x = Backend{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Backend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Backend) ProtoMessage() {} + +func (x *Backend) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Backend.ProtoReflect.Descriptor instead. func (*Backend) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{1} + return file_planfile_proto_rawDescGZIP(), []int{1} } -func (m *Backend) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Backend.Unmarshal(m, b) -} -func (m *Backend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Backend.Marshal(b, m, deterministic) -} -func (m *Backend) XXX_Merge(src proto.Message) { - xxx_messageInfo_Backend.Merge(m, src) -} -func (m *Backend) XXX_Size() int { - return xxx_messageInfo_Backend.Size(m) -} -func (m *Backend) XXX_DiscardUnknown() { - xxx_messageInfo_Backend.DiscardUnknown(m) -} - -var xxx_messageInfo_Backend proto.InternalMessageInfo - -func (m *Backend) GetType() string { - if m != nil { - return m.Type +func (x *Backend) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *Backend) GetConfig() *DynamicValue { - if m != nil { - return m.Config +func (x *Backend) GetConfig() *DynamicValue { + if x != nil { + return x.Config } return nil } -func (m *Backend) GetWorkspace() string { - if m != nil { - return m.Workspace +func (x *Backend) GetWorkspace() string { + if x != nil { + return x.Workspace } return "" } @@ -267,6 +330,10 @@ func (m *Backend) GetWorkspace() string { // Change represents a change made to some object, transforming it from an old // state to a new state. type Change struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Not all action values are valid for all object types. Consult // the documentation for any message that embeds Change. Action Action `protobuf:"varint,1,opt,name=action,proto3,enum=tfplan.Action" json:"action,omitempty"` @@ -279,52 +346,60 @@ type Change struct { // (or null, if no prior value exists) and the value that was or will be read, // respectively. // - For no-op, one value is provided that is left unmodified by this non-change. - Values []*DynamicValue `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Values []*DynamicValue `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` } -func (m *Change) Reset() { *m = Change{} } -func (m *Change) String() string { return proto.CompactTextString(m) } -func (*Change) ProtoMessage() {} +func (x *Change) Reset() { + *x = Change{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Change) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Change) ProtoMessage() {} + +func (x *Change) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Change.ProtoReflect.Descriptor instead. func (*Change) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{2} + return file_planfile_proto_rawDescGZIP(), []int{2} } -func (m *Change) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Change.Unmarshal(m, b) -} -func (m *Change) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Change.Marshal(b, m, deterministic) -} -func (m *Change) XXX_Merge(src proto.Message) { - xxx_messageInfo_Change.Merge(m, src) -} -func (m *Change) XXX_Size() int { - return xxx_messageInfo_Change.Size(m) -} -func (m *Change) XXX_DiscardUnknown() { - xxx_messageInfo_Change.DiscardUnknown(m) -} - -var xxx_messageInfo_Change proto.InternalMessageInfo - -func (m *Change) GetAction() Action { - if m != nil { - return m.Action +func (x *Change) GetAction() Action { + if x != nil { + return x.Action } return Action_NOOP } -func (m *Change) GetValues() []*DynamicValue { - if m != nil { - return m.Values +func (x *Change) GetValues() []*DynamicValue { + if x != nil { + return x.Values } return nil } type ResourceInstanceChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // module_path is an address to the module that defined this resource. // module_path is omitted for resources in the root module. For descendent modules // it is a string like module.foo.module.bar as would be seen at the beginning of a @@ -342,7 +417,7 @@ type ResourceInstanceChange struct { // attributes ("count" or "for_each") are being used for this resource. If none // are in use, this field is omitted. // - // Types that are valid to be assigned to InstanceKey: + // Types that are assignable to InstanceKey: // *ResourceInstanceChange_Str // *ResourceInstanceChange_Int InstanceKey isResourceInstanceChange_InstanceKey `protobuf_oneof:"instance_key"` @@ -366,65 +441,125 @@ type ResourceInstanceChange struct { // An unordered set of paths that prompted the change action to be // "replace" rather than "update". Empty for any action other than // "replace". - RequiredReplace []*Path `protobuf:"bytes,11,rep,name=required_replace,json=requiredReplace,proto3" json:"required_replace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RequiredReplace []*Path `protobuf:"bytes,11,rep,name=required_replace,json=requiredReplace,proto3" json:"required_replace,omitempty"` } -func (m *ResourceInstanceChange) Reset() { *m = ResourceInstanceChange{} } -func (m *ResourceInstanceChange) String() string { return proto.CompactTextString(m) } -func (*ResourceInstanceChange) ProtoMessage() {} +func (x *ResourceInstanceChange) Reset() { + *x = ResourceInstanceChange{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceInstanceChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceInstanceChange) ProtoMessage() {} + +func (x *ResourceInstanceChange) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceInstanceChange.ProtoReflect.Descriptor instead. func (*ResourceInstanceChange) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{3} + return file_planfile_proto_rawDescGZIP(), []int{3} } -func (m *ResourceInstanceChange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceInstanceChange.Unmarshal(m, b) -} -func (m *ResourceInstanceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceInstanceChange.Marshal(b, m, deterministic) -} -func (m *ResourceInstanceChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceInstanceChange.Merge(m, src) -} -func (m *ResourceInstanceChange) XXX_Size() int { - return xxx_messageInfo_ResourceInstanceChange.Size(m) -} -func (m *ResourceInstanceChange) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceInstanceChange.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceInstanceChange proto.InternalMessageInfo - -func (m *ResourceInstanceChange) GetModulePath() string { - if m != nil { - return m.ModulePath +func (x *ResourceInstanceChange) GetModulePath() string { + if x != nil { + return x.ModulePath } return "" } -func (m *ResourceInstanceChange) GetMode() ResourceInstanceChange_ResourceMode { - if m != nil { - return m.Mode +func (x *ResourceInstanceChange) GetMode() ResourceInstanceChange_ResourceMode { + if x != nil { + return x.Mode } return ResourceInstanceChange_managed } -func (m *ResourceInstanceChange) GetType() string { - if m != nil { - return m.Type +func (x *ResourceInstanceChange) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *ResourceInstanceChange) GetName() string { - if m != nil { - return m.Name +func (x *ResourceInstanceChange) GetName() string { + if x != nil { + return x.Name } return "" } +func (m *ResourceInstanceChange) GetInstanceKey() isResourceInstanceChange_InstanceKey { + if m != nil { + return m.InstanceKey + } + return nil +} + +func (x *ResourceInstanceChange) GetStr() string { + if x, ok := x.GetInstanceKey().(*ResourceInstanceChange_Str); ok { + return x.Str + } + return "" +} + +func (x *ResourceInstanceChange) GetInt() int64 { + if x, ok := x.GetInstanceKey().(*ResourceInstanceChange_Int); ok { + return x.Int + } + return 0 +} + +func (x *ResourceInstanceChange) GetDeposedKey() string { + if x != nil { + return x.DeposedKey + } + return "" +} + +func (x *ResourceInstanceChange) GetProvider() string { + if x != nil { + return x.Provider + } + return "" +} + +func (x *ResourceInstanceChange) GetChange() *Change { + if x != nil { + return x.Change + } + return nil +} + +func (x *ResourceInstanceChange) GetPrivate() []byte { + if x != nil { + return x.Private + } + return nil +} + +func (x *ResourceInstanceChange) GetRequiredReplace() []*Path { + if x != nil { + return x.RequiredReplace + } + return nil +} + type isResourceInstanceChange_InstanceKey interface { isResourceInstanceChange_InstanceKey() } @@ -441,71 +576,11 @@ func (*ResourceInstanceChange_Str) isResourceInstanceChange_InstanceKey() {} func (*ResourceInstanceChange_Int) isResourceInstanceChange_InstanceKey() {} -func (m *ResourceInstanceChange) GetInstanceKey() isResourceInstanceChange_InstanceKey { - if m != nil { - return m.InstanceKey - } - return nil -} - -func (m *ResourceInstanceChange) GetStr() string { - if x, ok := m.GetInstanceKey().(*ResourceInstanceChange_Str); ok { - return x.Str - } - return "" -} - -func (m *ResourceInstanceChange) GetInt() int64 { - if x, ok := m.GetInstanceKey().(*ResourceInstanceChange_Int); ok { - return x.Int - } - return 0 -} - -func (m *ResourceInstanceChange) GetDeposedKey() string { - if m != nil { - return m.DeposedKey - } - return "" -} - -func (m *ResourceInstanceChange) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *ResourceInstanceChange) GetChange() *Change { - if m != nil { - return m.Change - } - return nil -} - -func (m *ResourceInstanceChange) GetPrivate() []byte { - if m != nil { - return m.Private - } - return nil -} - -func (m *ResourceInstanceChange) GetRequiredReplace() []*Path { - if m != nil { - return m.RequiredReplace - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ResourceInstanceChange) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ResourceInstanceChange_Str)(nil), - (*ResourceInstanceChange_Int)(nil), - } -} - type OutputChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Name of the output as defined in the root module. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Description of the proposed change. May use "no-op", "create", @@ -514,54 +589,58 @@ type OutputChange struct { // Sensitive, if true, indicates that one or more of the values given // in "change" is sensitive and should not be shown directly in any // rendered plan. - Sensitive bool `protobuf:"varint,3,opt,name=sensitive,proto3" json:"sensitive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Sensitive bool `protobuf:"varint,3,opt,name=sensitive,proto3" json:"sensitive,omitempty"` } -func (m *OutputChange) Reset() { *m = OutputChange{} } -func (m *OutputChange) String() string { return proto.CompactTextString(m) } -func (*OutputChange) ProtoMessage() {} +func (x *OutputChange) Reset() { + *x = OutputChange{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutputChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutputChange) ProtoMessage() {} + +func (x *OutputChange) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutputChange.ProtoReflect.Descriptor instead. func (*OutputChange) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{4} + return file_planfile_proto_rawDescGZIP(), []int{4} } -func (m *OutputChange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OutputChange.Unmarshal(m, b) -} -func (m *OutputChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OutputChange.Marshal(b, m, deterministic) -} -func (m *OutputChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_OutputChange.Merge(m, src) -} -func (m *OutputChange) XXX_Size() int { - return xxx_messageInfo_OutputChange.Size(m) -} -func (m *OutputChange) XXX_DiscardUnknown() { - xxx_messageInfo_OutputChange.DiscardUnknown(m) -} - -var xxx_messageInfo_OutputChange proto.InternalMessageInfo - -func (m *OutputChange) GetName() string { - if m != nil { - return m.Name +func (x *OutputChange) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *OutputChange) GetChange() *Change { - if m != nil { - return m.Change +func (x *OutputChange) GetChange() *Change { + if x != nil { + return x.Change } return nil } -func (m *OutputChange) GetSensitive() bool { - if m != nil { - return m.Sensitive +func (x *OutputChange) GetSensitive() bool { + if x != nil { + return x.Sensitive } return false } @@ -579,40 +658,48 @@ func (m *OutputChange) GetSensitive() bool { // attribute is present. The top-level format version will not be incremented // for changes to the set of dynamic serialization formats. type DynamicValue struct { - Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` } -func (m *DynamicValue) Reset() { *m = DynamicValue{} } -func (m *DynamicValue) String() string { return proto.CompactTextString(m) } -func (*DynamicValue) ProtoMessage() {} +func (x *DynamicValue) Reset() { + *x = DynamicValue{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicValue) ProtoMessage() {} + +func (x *DynamicValue) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicValue.ProtoReflect.Descriptor instead. func (*DynamicValue) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{5} + return file_planfile_proto_rawDescGZIP(), []int{5} } -func (m *DynamicValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DynamicValue.Unmarshal(m, b) -} -func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) -} -func (m *DynamicValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DynamicValue.Merge(m, src) -} -func (m *DynamicValue) XXX_Size() int { - return xxx_messageInfo_DynamicValue.Size(m) -} -func (m *DynamicValue) XXX_DiscardUnknown() { - xxx_messageInfo_DynamicValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DynamicValue proto.InternalMessageInfo - -func (m *DynamicValue) GetMsgpack() []byte { - if m != nil { - return m.Msgpack +func (x *DynamicValue) GetMsgpack() []byte { + if x != nil { + return x.Msgpack } return nil } @@ -626,40 +713,48 @@ func (m *DynamicValue) GetMsgpack() []byte { // top-level format version will not be incremented for changes to the set of // hash algorithms. type Hash struct { - Sha256 []byte `protobuf:"bytes,1,opt,name=sha256,proto3" json:"sha256,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sha256 []byte `protobuf:"bytes,1,opt,name=sha256,proto3" json:"sha256,omitempty"` } -func (m *Hash) Reset() { *m = Hash{} } -func (m *Hash) String() string { return proto.CompactTextString(m) } -func (*Hash) ProtoMessage() {} +func (x *Hash) Reset() { + *x = Hash{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Hash) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Hash) ProtoMessage() {} + +func (x *Hash) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Hash.ProtoReflect.Descriptor instead. func (*Hash) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{6} + return file_planfile_proto_rawDescGZIP(), []int{6} } -func (m *Hash) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Hash.Unmarshal(m, b) -} -func (m *Hash) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Hash.Marshal(b, m, deterministic) -} -func (m *Hash) XXX_Merge(src proto.Message) { - xxx_messageInfo_Hash.Merge(m, src) -} -func (m *Hash) XXX_Size() int { - return xxx_messageInfo_Hash.Size(m) -} -func (m *Hash) XXX_DiscardUnknown() { - xxx_messageInfo_Hash.DiscardUnknown(m) -} - -var xxx_messageInfo_Hash proto.InternalMessageInfo - -func (m *Hash) GetSha256() []byte { - if m != nil { - return m.Sha256 +func (x *Hash) GetSha256() []byte { + if x != nil { + return x.Sha256 } return nil } @@ -668,95 +763,95 @@ func (m *Hash) GetSha256() []byte { // used to refer to a sub-structure within a dynamic data structure presented // separately. type Path struct { - Steps []*Path_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Steps []*Path_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` } -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} +func (x *Path) Reset() { + *x = Path{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Path) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path) ProtoMessage() {} + +func (x *Path) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path.ProtoReflect.Descriptor instead. func (*Path) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{7} + return file_planfile_proto_rawDescGZIP(), []int{7} } -func (m *Path) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path.Unmarshal(m, b) -} -func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path.Marshal(b, m, deterministic) -} -func (m *Path) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path.Merge(m, src) -} -func (m *Path) XXX_Size() int { - return xxx_messageInfo_Path.Size(m) -} -func (m *Path) XXX_DiscardUnknown() { - xxx_messageInfo_Path.DiscardUnknown(m) -} - -var xxx_messageInfo_Path proto.InternalMessageInfo - -func (m *Path) GetSteps() []*Path_Step { - if m != nil { - return m.Steps +func (x *Path) GetSteps() []*Path_Step { + if x != nil { + return x.Steps } return nil } type Path_Step struct { - // Types that are valid to be assigned to Selector: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Selector: // *Path_Step_AttributeName // *Path_Step_ElementKey - Selector isPath_Step_Selector `protobuf_oneof:"selector"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Selector isPath_Step_Selector `protobuf_oneof:"selector"` } -func (m *Path_Step) Reset() { *m = Path_Step{} } -func (m *Path_Step) String() string { return proto.CompactTextString(m) } -func (*Path_Step) ProtoMessage() {} +func (x *Path_Step) Reset() { + *x = Path_Step{} + if protoimpl.UnsafeEnabled { + mi := &file_planfile_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Path_Step) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path_Step) ProtoMessage() {} + +func (x *Path_Step) ProtoReflect() protoreflect.Message { + mi := &file_planfile_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path_Step.ProtoReflect.Descriptor instead. func (*Path_Step) Descriptor() ([]byte, []int) { - return fileDescriptor_02431083a6706c5b, []int{7, 0} + return file_planfile_proto_rawDescGZIP(), []int{7, 0} } -func (m *Path_Step) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path_Step.Unmarshal(m, b) -} -func (m *Path_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path_Step.Marshal(b, m, deterministic) -} -func (m *Path_Step) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path_Step.Merge(m, src) -} -func (m *Path_Step) XXX_Size() int { - return xxx_messageInfo_Path_Step.Size(m) -} -func (m *Path_Step) XXX_DiscardUnknown() { - xxx_messageInfo_Path_Step.DiscardUnknown(m) -} - -var xxx_messageInfo_Path_Step proto.InternalMessageInfo - -type isPath_Step_Selector interface { - isPath_Step_Selector() -} - -type Path_Step_AttributeName struct { - AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` -} - -type Path_Step_ElementKey struct { - ElementKey *DynamicValue `protobuf:"bytes,2,opt,name=element_key,json=elementKey,proto3,oneof"` -} - -func (*Path_Step_AttributeName) isPath_Step_Selector() {} - -func (*Path_Step_ElementKey) isPath_Step_Selector() {} - func (m *Path_Step) GetSelector() isPath_Step_Selector { if m != nil { return m.Selector @@ -764,102 +859,351 @@ func (m *Path_Step) GetSelector() isPath_Step_Selector { return nil } -func (m *Path_Step) GetAttributeName() string { - if x, ok := m.GetSelector().(*Path_Step_AttributeName); ok { +func (x *Path_Step) GetAttributeName() string { + if x, ok := x.GetSelector().(*Path_Step_AttributeName); ok { return x.AttributeName } return "" } -func (m *Path_Step) GetElementKey() *DynamicValue { - if x, ok := m.GetSelector().(*Path_Step_ElementKey); ok { +func (x *Path_Step) GetElementKey() *DynamicValue { + if x, ok := x.GetSelector().(*Path_Step_ElementKey); ok { return x.ElementKey } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Path_Step) XXX_OneofWrappers() []interface{} { - return []interface{}{ +type isPath_Step_Selector interface { + isPath_Step_Selector() +} + +type Path_Step_AttributeName struct { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type Path_Step_ElementKey struct { + // Set "element_key" to represent looking up an element in + // an indexable collection type. + ElementKey *DynamicValue `protobuf:"bytes,2,opt,name=element_key,json=elementKey,proto3,oneof"` +} + +func (*Path_Step_AttributeName) isPath_Step_Selector() {} + +func (*Path_Step_ElementKey) isPath_Step_Selector() {} + +var File_planfile_proto protoreflect.FileDescriptor + +var file_planfile_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x06, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x22, 0xce, 0x04, 0x0a, 0x04, 0x50, 0x6c, 0x61, + 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x09, 0x76, + 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x2e, 0x56, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x76, 0x61, 0x72, + 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x12, 0x3b, 0x0a, 0x0e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x61, 0x6e, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, + 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, + 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, + 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, + 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, + 0x73, 0x68, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x61, 0x6e, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x07, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x1a, 0x52, 0x0a, 0x0e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4f, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x22, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x69, 0x0a, 0x07, 0x42, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, + 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x06, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, + 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x22, 0xb9, 0x03, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, + 0x12, 0x3f, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x73, 0x74, 0x72, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x73, 0x74, 0x72, 0x12, 0x12, 0x0a, + 0x03, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x03, 0x69, 0x6e, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4b, + 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x26, + 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x12, 0x37, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x61, 0x6e, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0c, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x64, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x10, 0x01, + 0x42, 0x0e, 0x0a, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x22, 0x68, 0x0a, 0x0c, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x22, 0x28, 0x0a, 0x0c, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x73, + 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x73, 0x67, + 0x70, 0x61, 0x63, 0x6b, 0x22, 0x1e, 0x0a, 0x04, 0x48, 0x61, 0x73, 0x68, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x22, 0xa5, 0x01, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x27, 0x0a, + 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, + 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x74, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, + 0x0a, 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, + 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2a, 0x70, 0x0a, 0x06, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4f, 0x50, 0x10, 0x00, + 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x52, 0x45, 0x41, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x05, 0x12, 0x16, + 0x0a, 0x12, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x54, 0x48, 0x45, 0x4e, 0x5f, 0x43, 0x52, + 0x45, 0x41, 0x54, 0x45, 0x10, 0x06, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, + 0x5f, 0x54, 0x48, 0x45, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x07, 0x42, 0x39, + 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, + 0x2f, 0x70, 0x6c, 0x61, 0x6e, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x6c, 0x61, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_planfile_proto_rawDescOnce sync.Once + file_planfile_proto_rawDescData = file_planfile_proto_rawDesc +) + +func file_planfile_proto_rawDescGZIP() []byte { + file_planfile_proto_rawDescOnce.Do(func() { + file_planfile_proto_rawDescData = protoimpl.X.CompressGZIP(file_planfile_proto_rawDescData) + }) + return file_planfile_proto_rawDescData +} + +var file_planfile_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_planfile_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_planfile_proto_goTypes = []interface{}{ + (Action)(0), // 0: tfplan.Action + (ResourceInstanceChange_ResourceMode)(0), // 1: tfplan.ResourceInstanceChange.ResourceMode + (*Plan)(nil), // 2: tfplan.Plan + (*Backend)(nil), // 3: tfplan.Backend + (*Change)(nil), // 4: tfplan.Change + (*ResourceInstanceChange)(nil), // 5: tfplan.ResourceInstanceChange + (*OutputChange)(nil), // 6: tfplan.OutputChange + (*DynamicValue)(nil), // 7: tfplan.DynamicValue + (*Hash)(nil), // 8: tfplan.Hash + (*Path)(nil), // 9: tfplan.Path + nil, // 10: tfplan.Plan.VariablesEntry + nil, // 11: tfplan.Plan.ProviderHashesEntry + (*Path_Step)(nil), // 12: tfplan.Path.Step +} +var file_planfile_proto_depIdxs = []int32{ + 10, // 0: tfplan.Plan.variables:type_name -> tfplan.Plan.VariablesEntry + 5, // 1: tfplan.Plan.resource_changes:type_name -> tfplan.ResourceInstanceChange + 6, // 2: tfplan.Plan.output_changes:type_name -> tfplan.OutputChange + 11, // 3: tfplan.Plan.provider_hashes:type_name -> tfplan.Plan.ProviderHashesEntry + 3, // 4: tfplan.Plan.backend:type_name -> tfplan.Backend + 7, // 5: tfplan.Backend.config:type_name -> tfplan.DynamicValue + 0, // 6: tfplan.Change.action:type_name -> tfplan.Action + 7, // 7: tfplan.Change.values:type_name -> tfplan.DynamicValue + 1, // 8: tfplan.ResourceInstanceChange.mode:type_name -> tfplan.ResourceInstanceChange.ResourceMode + 4, // 9: tfplan.ResourceInstanceChange.change:type_name -> tfplan.Change + 9, // 10: tfplan.ResourceInstanceChange.required_replace:type_name -> tfplan.Path + 4, // 11: tfplan.OutputChange.change:type_name -> tfplan.Change + 12, // 12: tfplan.Path.steps:type_name -> tfplan.Path.Step + 7, // 13: tfplan.Plan.VariablesEntry.value:type_name -> tfplan.DynamicValue + 8, // 14: tfplan.Plan.ProviderHashesEntry.value:type_name -> tfplan.Hash + 7, // 15: tfplan.Path.Step.element_key:type_name -> tfplan.DynamicValue + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_planfile_proto_init() } +func file_planfile_proto_init() { + if File_planfile_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_planfile_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Plan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Backend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Change); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceInstanceChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutputChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Hash); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_planfile_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path_Step); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_planfile_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*ResourceInstanceChange_Str)(nil), + (*ResourceInstanceChange_Int)(nil), + } + file_planfile_proto_msgTypes[10].OneofWrappers = []interface{}{ (*Path_Step_AttributeName)(nil), (*Path_Step_ElementKey)(nil), } -} - -func init() { - proto.RegisterEnum("tfplan.Action", Action_name, Action_value) - proto.RegisterEnum("tfplan.ResourceInstanceChange_ResourceMode", ResourceInstanceChange_ResourceMode_name, ResourceInstanceChange_ResourceMode_value) - proto.RegisterType((*Plan)(nil), "tfplan.Plan") - proto.RegisterMapType((map[string]*Hash)(nil), "tfplan.Plan.ProviderHashesEntry") - proto.RegisterMapType((map[string]*DynamicValue)(nil), "tfplan.Plan.VariablesEntry") - proto.RegisterType((*Backend)(nil), "tfplan.Backend") - proto.RegisterType((*Change)(nil), "tfplan.Change") - proto.RegisterType((*ResourceInstanceChange)(nil), "tfplan.ResourceInstanceChange") - proto.RegisterType((*OutputChange)(nil), "tfplan.OutputChange") - proto.RegisterType((*DynamicValue)(nil), "tfplan.DynamicValue") - proto.RegisterType((*Hash)(nil), "tfplan.Hash") - proto.RegisterType((*Path)(nil), "tfplan.Path") - proto.RegisterType((*Path_Step)(nil), "tfplan.Path.Step") -} - -func init() { proto.RegisterFile("planfile.proto", fileDescriptor_02431083a6706c5b) } - -var fileDescriptor_02431083a6706c5b = []byte{ - // 893 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0xe1, 0x6e, 0xe3, 0x44, - 0x10, 0xae, 0x63, 0xc7, 0x49, 0x26, 0xa9, 0x9b, 0x5b, 0x50, 0x65, 0x95, 0xd3, 0x11, 0x2c, 0xc1, - 0x85, 0x3b, 0x94, 0x4a, 0x41, 0x50, 0x0e, 0x7e, 0xa0, 0xf6, 0x1a, 0x29, 0xd5, 0x41, 0x1b, 0x2d, - 0xa5, 0x3f, 0xf8, 0x81, 0xb5, 0xb1, 0xa7, 0x89, 0x55, 0xc7, 0x36, 0xbb, 0x9b, 0xa0, 0x3c, 0x10, - 0x0f, 0xc1, 0x4b, 0xf0, 0x4c, 0x68, 0x77, 0x6d, 0x27, 0x95, 0x7a, 0xfd, 0x95, 0x9d, 0x6f, 0x66, - 0x3e, 0xcf, 0x7e, 0x33, 0xb3, 0x01, 0xaf, 0x48, 0x59, 0x76, 0x9f, 0xa4, 0x38, 0x2a, 0x78, 0x2e, - 0x73, 0xe2, 0xca, 0x7b, 0x85, 0x04, 0xff, 0x39, 0xe0, 0xcc, 0x52, 0x96, 0x11, 0x1f, 0x5a, 0x1b, - 0xe4, 0x22, 0xc9, 0x33, 0xdf, 0x1a, 0x58, 0x43, 0x87, 0x56, 0x26, 0x79, 0x07, 0x9d, 0x0d, 0xe3, - 0x09, 0x9b, 0xa7, 0x28, 0xfc, 0xc6, 0xc0, 0x1e, 0x76, 0xc7, 0x9f, 0x8d, 0x4c, 0xfa, 0x48, 0xa5, - 0x8e, 0xee, 0x2a, 0xef, 0x24, 0x93, 0x7c, 0x4b, 0x77, 0xd1, 0xe4, 0x0a, 0xfa, 0x1c, 0x45, 0xbe, - 0xe6, 0x11, 0x86, 0xd1, 0x92, 0x65, 0x0b, 0x14, 0xbe, 0xad, 0x19, 0x5e, 0x55, 0x0c, 0xb4, 0xf4, - 0x5f, 0x65, 0x42, 0xb2, 0x2c, 0xc2, 0xf7, 0x3a, 0x8c, 0x1e, 0x55, 0x79, 0xc6, 0x16, 0xe4, 0x27, - 0xf0, 0xf2, 0xb5, 0x2c, 0xd6, 0xb2, 0x26, 0x72, 0x34, 0xd1, 0xa7, 0x15, 0xd1, 0x8d, 0xf6, 0x96, - 0xe9, 0x87, 0xf9, 0x9e, 0x25, 0xc8, 0x17, 0xd0, 0x93, 0x8c, 0x2f, 0x50, 0x86, 0x2c, 0x8e, 0xb9, - 0xf0, 0x9b, 0x03, 0x7b, 0xd8, 0xa1, 0x5d, 0x83, 0x9d, 0x2b, 0x88, 0xbc, 0x85, 0x17, 0x12, 0x39, - 0x67, 0xf7, 0x39, 0x5f, 0x85, 0x95, 0x12, 0xde, 0xc0, 0x1a, 0x76, 0x68, 0xbf, 0x76, 0xdc, 0x95, - 0x92, 0x5c, 0xc1, 0x51, 0xc1, 0xf3, 0x4d, 0x12, 0x23, 0x0f, 0x97, 0x4c, 0x2c, 0x51, 0xf8, 0x47, - 0xba, 0x9a, 0xc1, 0x23, 0x61, 0x66, 0x65, 0xcc, 0x54, 0x87, 0x18, 0x75, 0xbc, 0xe2, 0x11, 0x48, - 0xbe, 0x86, 0xd6, 0x9c, 0x45, 0x0f, 0x98, 0xc5, 0xfe, 0xe1, 0xc0, 0x1a, 0x76, 0xc7, 0x47, 0x15, - 0xc5, 0x85, 0x81, 0x69, 0xe5, 0x3f, 0xa1, 0xe0, 0x3d, 0x96, 0x9a, 0xf4, 0xc1, 0x7e, 0xc0, 0xad, - 0x6e, 0x58, 0x87, 0xaa, 0x23, 0x79, 0x03, 0xcd, 0x0d, 0x4b, 0xd7, 0xe8, 0x37, 0x34, 0x59, 0xad, - 0xce, 0xe5, 0x36, 0x63, 0xab, 0x24, 0xba, 0x53, 0x3e, 0x6a, 0x42, 0x7e, 0x6c, 0xfc, 0x60, 0x9d, - 0xdc, 0xc0, 0x27, 0x4f, 0x54, 0xf9, 0x04, 0x71, 0xf0, 0x98, 0xb8, 0x57, 0x11, 0xab, 0xac, 0x3d, - 0xc2, 0x20, 0x81, 0x56, 0x59, 0x38, 0x21, 0xe0, 0xc8, 0x6d, 0x81, 0x25, 0x8b, 0x3e, 0x93, 0x6f, - 0xc0, 0x8d, 0xf2, 0xec, 0x3e, 0x59, 0x3c, 0x5b, 0x60, 0x19, 0x43, 0x5e, 0x42, 0xe7, 0xef, 0x9c, - 0x3f, 0x88, 0x82, 0x45, 0xe8, 0xdb, 0x9a, 0x66, 0x07, 0x04, 0x7f, 0x82, 0x6b, 0x1a, 0x4c, 0xbe, - 0x02, 0x97, 0x45, 0xb2, 0x9a, 0x5d, 0x6f, 0xec, 0x55, 0xac, 0xe7, 0x1a, 0xa5, 0xa5, 0x57, 0x7d, - 0x5d, 0x57, 0x5a, 0xcd, 0xf1, 0x47, 0xbe, 0x6e, 0x62, 0x82, 0x7f, 0x6d, 0x38, 0x7e, 0x7a, 0x3c, - 0xc9, 0xe7, 0xd0, 0x5d, 0xe5, 0xf1, 0x3a, 0xc5, 0xb0, 0x60, 0x72, 0x59, 0xde, 0x10, 0x0c, 0x34, - 0x63, 0x72, 0x49, 0x7e, 0x06, 0x67, 0x95, 0xc7, 0x46, 0x2d, 0x6f, 0xfc, 0xf6, 0xf9, 0x69, 0xaf, - 0xe1, 0x5f, 0xf3, 0x18, 0xa9, 0x4e, 0xac, 0xc5, 0xb3, 0xf7, 0xc4, 0x23, 0xe0, 0x64, 0x6c, 0x85, - 0xbe, 0x63, 0x30, 0x75, 0x26, 0x04, 0x6c, 0x21, 0xb9, 0xdf, 0x54, 0xd0, 0xf4, 0x80, 0x2a, 0x43, - 0x61, 0x49, 0x26, 0x7d, 0x77, 0x60, 0x0d, 0x6d, 0x85, 0x25, 0x99, 0x54, 0x15, 0xc7, 0x58, 0xe4, - 0x02, 0xe3, 0x50, 0x75, 0xb6, 0x65, 0x2a, 0x2e, 0xa1, 0x0f, 0xb8, 0x25, 0x27, 0xd0, 0xae, 0x46, - 0xd3, 0x6f, 0x6b, 0x6f, 0x6d, 0x2b, 0x7d, 0xcd, 0xd6, 0xf9, 0x1d, 0xdd, 0xb5, 0x5a, 0xdf, 0x72, - 0xdd, 0x4a, 0xaf, 0x7a, 0x44, 0x0a, 0x9e, 0x6c, 0x98, 0x44, 0x1f, 0x06, 0xd6, 0xb0, 0x47, 0x2b, - 0x93, 0x9c, 0xa9, 0x97, 0xe0, 0xaf, 0x75, 0xc2, 0x31, 0x0e, 0x39, 0x16, 0xa9, 0x6a, 0x68, 0x57, - 0xf7, 0xa0, 0x9e, 0x24, 0xa5, 0x9b, 0xda, 0x7b, 0x13, 0x45, 0x4d, 0x50, 0xf0, 0x25, 0xf4, 0xf6, - 0xd5, 0x21, 0x5d, 0x68, 0xad, 0x58, 0xc6, 0x16, 0x18, 0xf7, 0x0f, 0x48, 0x1b, 0x9c, 0x98, 0x49, - 0xd6, 0xb7, 0x2e, 0x3c, 0xe8, 0x25, 0xa5, 0xa6, 0xea, 0x7e, 0xc1, 0x12, 0x7a, 0xfb, 0x0f, 0x42, - 0x2d, 0x9d, 0xb5, 0x27, 0xdd, 0xee, 0x56, 0x8d, 0x67, 0x6f, 0xf5, 0x12, 0x3a, 0x02, 0x33, 0x91, - 0xc8, 0x64, 0x63, 0xfa, 0xd1, 0xa6, 0x3b, 0x20, 0x18, 0x42, 0x6f, 0x7f, 0x7a, 0x94, 0x06, 0x2b, - 0xb1, 0x28, 0x58, 0xf4, 0xa0, 0x3f, 0xd6, 0xa3, 0x95, 0x19, 0xbc, 0x02, 0x47, 0x6d, 0x0b, 0x39, - 0x06, 0x57, 0x2c, 0xd9, 0xf8, 0xbb, 0xef, 0xcb, 0x80, 0xd2, 0x0a, 0xfe, 0xb1, 0xc0, 0xd1, 0xc3, - 0xf3, 0x1a, 0x9a, 0x42, 0x62, 0x21, 0x7c, 0x4b, 0x2b, 0xf4, 0x62, 0x5f, 0xa1, 0xd1, 0x6f, 0x12, - 0x0b, 0x6a, 0xfc, 0x27, 0x12, 0x1c, 0x65, 0x92, 0xd7, 0xe0, 0x31, 0x29, 0x79, 0x32, 0x5f, 0x4b, - 0x0c, 0x77, 0xf7, 0x9c, 0x1e, 0xd0, 0xc3, 0x1a, 0xbf, 0x56, 0x57, 0x3e, 0x83, 0x2e, 0xa6, 0xb8, - 0xc2, 0x4c, 0xea, 0x29, 0x78, 0x66, 0x07, 0xa7, 0x07, 0x14, 0xca, 0xd0, 0x0f, 0xb8, 0xbd, 0x00, - 0x68, 0x0b, 0x4c, 0x31, 0x92, 0x39, 0x7f, 0x53, 0x80, 0x6b, 0xf6, 0x4a, 0xe9, 0x7f, 0x7d, 0x73, - 0x33, 0xeb, 0x1f, 0x10, 0x00, 0xf7, 0x3d, 0x9d, 0x9c, 0xdf, 0x4e, 0xfa, 0x96, 0x42, 0xe9, 0xe4, - 0xfc, 0xb2, 0xdf, 0x50, 0xe8, 0xef, 0xb3, 0x4b, 0x85, 0xda, 0xea, 0x7c, 0x39, 0xf9, 0x65, 0x72, - 0x3b, 0xe9, 0x37, 0xc9, 0x31, 0x10, 0x73, 0x0e, 0x6f, 0xa7, 0x93, 0xeb, 0xb0, 0xcc, 0x74, 0x15, - 0x6e, 0xce, 0x06, 0x2f, 0xe3, 0x5b, 0x17, 0xef, 0xfe, 0x38, 0x5b, 0x24, 0x72, 0xb9, 0x9e, 0x8f, - 0xa2, 0x7c, 0x75, 0xaa, 0x5e, 0xdc, 0x24, 0xca, 0x79, 0x71, 0x5a, 0x3f, 0xcc, 0xa7, 0xaa, 0x7e, - 0x71, 0x9a, 0x64, 0x12, 0x79, 0xc6, 0x52, 0x6d, 0xea, 0x3f, 0xba, 0xb9, 0xab, 0x7f, 0xbe, 0xfd, - 0x3f, 0x00, 0x00, 0xff, 0xff, 0x30, 0x3e, 0x4e, 0x33, 0x01, 0x07, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_planfile_proto_rawDesc, + NumEnums: 2, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_planfile_proto_goTypes, + DependencyIndexes: file_planfile_proto_depIdxs, + EnumInfos: file_planfile_proto_enumTypes, + MessageInfos: file_planfile_proto_msgTypes, + }.Build() + File_planfile_proto = out.File + file_planfile_proto_rawDesc = nil + file_planfile_proto_goTypes = nil + file_planfile_proto_depIdxs = nil } diff --git a/plans/objchange/compatible.go b/plans/objchange/compatible.go index 6d92fca4d..efb4023d2 100644 --- a/plans/objchange/compatible.go +++ b/plans/objchange/compatible.go @@ -357,6 +357,10 @@ func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBl return false // treated as if the list were empty, so we would see zero iterations below } + // Unmark before we call ElementIterator in case this iterable is marked sensitive. + // This can arise in the case where a member of a Set is sensitive, and thus the + // whole Set is marked sensitive + v, _ := v.Unmark() // For all other nesting modes, our value should be something iterable. for it := v.ElementIterator(); it.Next(); { _, ev := it.Element() diff --git a/plans/objchange/compatible_test.go b/plans/objchange/compatible_test.go index 9a6924441..ac3793674 100644 --- a/plans/objchange/compatible_test.go +++ b/plans/objchange/compatible_test.go @@ -194,6 +194,49 @@ func TestAssertObjectCompatible(t *testing.T) { `.name: inconsistent values for sensitive attribute`, }, }, + { + // This tests the codepath that leads to couldHaveUnknownBlockPlaceholder, + // where a set may be sensitive and need to be unmarked before it + // is iterated upon + &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "configuration": { + Nesting: configschema.NestingList, + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "sensitive_fields": { + Nesting: configschema.NestingSet, + Block: schemaWithFoo, + }, + }, + }, + }, + }, + }, + cty.ObjectVal(map[string]cty.Value{ + "configuration": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive_fields": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("secret"), + }), + }).Mark("sensitive"), + }), + }), + }), + cty.ObjectVal(map[string]cty.Value{ + "configuration": cty.TupleVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "sensitive_fields": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("secret"), + }), + }).Mark("sensitive"), + }), + }), + }), + nil, + }, { &configschema.Block{ Attributes: map[string]*configschema.Attribute{ diff --git a/plans/plan_test.go b/plans/plan_test.go index 012ff06a6..b8a0e4501 100644 --- a/plans/plan_test.go +++ b/plans/plan_test.go @@ -68,3 +68,28 @@ func TestProviderAddrs(t *testing.T) { t.Error(problem) } } + +// Module outputs should not effect the result of Empty +func TestModuleOutputChangesEmpty(t *testing.T) { + changes := &Changes{ + Outputs: []*OutputChangeSrc{ + { + Addr: addrs.AbsOutputValue{ + Module: addrs.RootModuleInstance.Child("child", addrs.NoKey), + OutputValue: addrs.OutputValue{ + Name: "output", + }, + }, + ChangeSrc: ChangeSrc{ + Action: Update, + Before: []byte("a"), + After: []byte("b"), + }, + }, + }, + } + + if !changes.Empty() { + t.Fatal("plan has no visible changes") + } +} diff --git a/plans/planfile/tfplan.go b/plans/planfile/tfplan.go index e1deeb083..5162aae75 100644 --- a/plans/planfile/tfplan.go +++ b/plans/planfile/tfplan.go @@ -5,7 +5,7 @@ import ( "io" "io/ioutil" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/plans" diff --git a/plugin/convert/diagnostics.go b/plugin/convert/diagnostics.go index 51cb2fe2f..7eca32888 100644 --- a/plugin/convert/diagnostics.go +++ b/plugin/convert/diagnostics.go @@ -65,7 +65,7 @@ func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { var newDiag tfdiags.Diagnostic // if there's an attribute path, we need to create a AttributeValue diagnostic - if d.Attribute != nil { + if d.Attribute != nil && len(d.Attribute.Steps) > 0 { path := AttributePathToPath(d.Attribute) newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) } else { diff --git a/plugin/convert/diagnostics_test.go b/plugin/convert/diagnostics_test.go index 5825269a5..b8574dc4d 100644 --- a/plugin/convert/diagnostics_test.go +++ b/plugin/convert/diagnostics_test.go @@ -5,11 +5,19 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) +var ignoreUnexported = cmpopts.IgnoreUnexported( + proto.Diagnostic{}, + proto.Schema_Block{}, + proto.Schema_NestedBlock{}, + proto.Schema_Attribute{}, +) + func TestProtoDiagnostics(t *testing.T) { diags := WarnsAndErrsToProto( []string{ @@ -41,8 +49,8 @@ func TestProtoDiagnostics(t *testing.T) { }, } - if !cmp.Equal(expected, diags) { - t.Fatal(cmp.Diff(expected, diags)) + if !cmp.Equal(expected, diags, ignoreUnexported) { + t.Fatal(cmp.Diff(expected, diags, ignoreUnexported)) } } diff --git a/plugin/convert/schema_test.go b/plugin/convert/schema_test.go index 8ebf0fdd0..45e4d8eac 100644 --- a/plugin/convert/schema_test.go +++ b/plugin/convert/schema_test.go @@ -353,8 +353,8 @@ func TestConvertProtoSchemaBlocks(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { converted := ConfigSchemaToProto(tc.Block) - if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty) { - t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty)) + if !cmp.Equal(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported) { + t.Fatal(cmp.Diff(converted, tc.Want, typeComparer, equateEmpty, ignoreUnexported)) } }) } diff --git a/plugin/discovery/meta_set.go b/plugin/discovery/meta_set.go index 3a992892d..72e4ce20e 100644 --- a/plugin/discovery/meta_set.go +++ b/plugin/discovery/meta_set.go @@ -118,7 +118,7 @@ func (s PluginMetaSet) Newest() PluginMeta { panic(err) } - if first == true || version.NewerThan(winnerVersion) { + if first || version.NewerThan(winnerVersion) { winner = p winnerVersion = version first = false diff --git a/plugin/plugin.go b/plugin/plugin.go index e4fb57761..07a3000c0 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -4,8 +4,6 @@ import ( "github.com/hashicorp/go-plugin" ) -// See serve.go for serving plugins - var VersionedPlugins = map[int]plugin.PluginSet{ 5: { "provider": &GRPCProviderPlugin{}, diff --git a/plugin/plugin_test.go b/plugin/plugin_test.go deleted file mode 100644 index ddef40ab2..000000000 --- a/plugin/plugin_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package plugin - -import ( - "github.com/hashicorp/terraform/terraform" -) - -func testProviderFixed(p terraform.ResourceProvider) ProviderFunc { - return func() terraform.ResourceProvider { - return p - } -} - -func testProvisionerFixed(p terraform.ResourceProvisioner) ProvisionerFunc { - return func() terraform.ResourceProvisioner { - return p - } -} diff --git a/plugin/resource_provider.go b/plugin/resource_provider.go deleted file mode 100644 index a9d520581..000000000 --- a/plugin/resource_provider.go +++ /dev/null @@ -1,620 +0,0 @@ -package plugin - -import ( - "net/rpc" - - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -// ResourceProviderPlugin is the plugin.Plugin implementation. -type ResourceProviderPlugin struct { - ResourceProvider func() terraform.ResourceProvider -} - -func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProviderServer{ - Broker: b, - Provider: p.ResourceProvider(), - }, nil -} - -func (p *ResourceProviderPlugin) Client( - b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { - return &ResourceProvider{Broker: b, Client: c}, nil -} - -// ResourceProvider is an implementation of terraform.ResourceProvider -// that communicates over RPC. -type ResourceProvider struct { - Broker *plugin.MuxBroker - Client *rpc.Client -} - -func (p *ResourceProvider) Stop() error { - var resp ResourceProviderStopResponse - err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { - var result ResourceProviderGetSchemaResponse - args := &ResourceProviderGetSchemaArgs{ - Req: req, - } - - err := p.Client.Call("Plugin.GetSchema", args, &result) - if err != nil { - return nil, err - } - - if result.Error != nil { - err = result.Error - } - - return result.Schema, err -} - -func (p *ResourceProvider) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { - id := p.Broker.NextId() - go p.Broker.AcceptAndServe(id, &UIInputServer{ - UIInput: input, - }) - - var resp ResourceProviderInputResponse - args := ResourceProviderInputArgs{ - InputId: id, - Config: c, - } - - err := p.Client.Call("Plugin.Input", &args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - return nil, err - } - - return resp.Config, nil -} - -func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResponse - args := ResourceProviderValidateArgs{ - Config: c, - } - - err := p.Client.Call("Plugin.Validate", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) ValidateResource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateResource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { - var resp ResourceProviderConfigureResponse - err := p.Client.Call("Plugin.Configure", c, &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvider) Apply( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderApplyResponse - args := &ResourceProviderApplyArgs{ - Info: info, - State: s, - Diff: d, - } - - err := p.Client.Call("Plugin.Apply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Diff( - info *terraform.InstanceInfo, - s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderDiffResponse - args := &ResourceProviderDiffArgs{ - Info: info, - State: s, - Config: c, - } - err := p.Client.Call("Plugin.Diff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ValidateDataSource( - t string, c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProviderValidateResourceResponse - args := ResourceProviderValidateResourceArgs{ - Config: c, - Type: t, - } - - err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvider) Refresh( - info *terraform.InstanceInfo, - s *terraform.InstanceState) (*terraform.InstanceState, error) { - var resp ResourceProviderRefreshResponse - args := &ResourceProviderRefreshArgs{ - Info: info, - State: s, - } - - err := p.Client.Call("Plugin.Refresh", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) ImportState( - info *terraform.InstanceInfo, - id string) ([]*terraform.InstanceState, error) { - var resp ResourceProviderImportStateResponse - args := &ResourceProviderImportStateArgs{ - Info: info, - Id: id, - } - - err := p.Client.Call("Plugin.ImportState", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) Resources() []terraform.ResourceType { - var result []terraform.ResourceType - - err := p.Client.Call("Plugin.Resources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) ReadDataDiff( - info *terraform.InstanceInfo, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { - var resp ResourceProviderReadDataDiffResponse - args := &ResourceProviderReadDataDiffArgs{ - Info: info, - Config: c, - } - - err := p.Client.Call("Plugin.ReadDataDiff", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.Diff, err -} - -func (p *ResourceProvider) ReadDataApply( - info *terraform.InstanceInfo, - d *terraform.InstanceDiff) (*terraform.InstanceState, error) { - var resp ResourceProviderReadDataApplyResponse - args := &ResourceProviderReadDataApplyArgs{ - Info: info, - Diff: d, - } - - err := p.Client.Call("Plugin.ReadDataApply", args, &resp) - if err != nil { - return nil, err - } - if resp.Error != nil { - err = resp.Error - } - - return resp.State, err -} - -func (p *ResourceProvider) DataSources() []terraform.DataSource { - var result []terraform.DataSource - - err := p.Client.Call("Plugin.DataSources", new(interface{}), &result) - if err != nil { - // TODO: panic, log, what? - return nil - } - - return result -} - -func (p *ResourceProvider) Close() error { - return p.Client.Close() -} - -// ResourceProviderServer is a net/rpc compatible structure for serving -// a ResourceProvider. This should not be used directly. -type ResourceProviderServer struct { - Broker *plugin.MuxBroker - Provider terraform.ResourceProvider -} - -type ResourceProviderStopResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderGetSchemaArgs struct { - Req *terraform.ProviderSchemaRequest -} - -type ResourceProviderGetSchemaResponse struct { - Schema *terraform.ProviderSchema - Error *plugin.BasicError -} - -type ResourceProviderConfigureResponse struct { - Error *plugin.BasicError -} - -type ResourceProviderInputArgs struct { - InputId uint32 - Config *terraform.ResourceConfig -} - -type ResourceProviderInputResponse struct { - Config *terraform.ResourceConfig - Error *plugin.BasicError -} - -type ResourceProviderApplyArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Diff *terraform.InstanceDiff -} - -type ResourceProviderApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderDiffArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState - Config *terraform.ResourceConfig -} - -type ResourceProviderDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderRefreshArgs struct { - Info *terraform.InstanceInfo - State *terraform.InstanceState -} - -type ResourceProviderRefreshResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderImportStateArgs struct { - Info *terraform.InstanceInfo - Id string -} - -type ResourceProviderImportStateResponse struct { - State []*terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataApplyArgs struct { - Info *terraform.InstanceInfo - Diff *terraform.InstanceDiff -} - -type ResourceProviderReadDataApplyResponse struct { - State *terraform.InstanceState - Error *plugin.BasicError -} - -type ResourceProviderReadDataDiffArgs struct { - Info *terraform.InstanceInfo - Config *terraform.ResourceConfig -} - -type ResourceProviderReadDataDiffResponse struct { - Diff *terraform.InstanceDiff - Error *plugin.BasicError -} - -type ResourceProviderValidateArgs struct { - Config *terraform.ResourceConfig -} - -type ResourceProviderValidateResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -type ResourceProviderValidateResourceArgs struct { - Config *terraform.ResourceConfig - Type string -} - -type ResourceProviderValidateResourceResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -func (s *ResourceProviderServer) Stop( - _ interface{}, - reply *ResourceProviderStopResponse) error { - err := s.Provider.Stop() - *reply = ResourceProviderStopResponse{ - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) GetSchema( - args *ResourceProviderGetSchemaArgs, - result *ResourceProviderGetSchemaResponse, -) error { - schema, err := s.Provider.GetSchema(args.Req) - result.Schema = schema - if err != nil { - result.Error = plugin.NewBasicError(err) - } - return nil -} - -func (s *ResourceProviderServer) Input( - args *ResourceProviderInputArgs, - reply *ResourceProviderInputResponse) error { - conn, err := s.Broker.Dial(args.InputId) - if err != nil { - *reply = ResourceProviderInputResponse{ - Error: plugin.NewBasicError(err), - } - return nil - } - client := rpc.NewClient(conn) - defer client.Close() - - input := &UIInput{Client: client} - - config, err := s.Provider.Input(input, args.Config) - *reply = ResourceProviderInputResponse{ - Config: config, - Error: plugin.NewBasicError(err), - } - - return nil -} - -func (s *ResourceProviderServer) Validate( - args *ResourceProviderValidateArgs, - reply *ResourceProviderValidateResponse) error { - warns, errs := s.Provider.Validate(args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ValidateResource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateResource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) Configure( - config *terraform.ResourceConfig, - reply *ResourceProviderConfigureResponse) error { - err := s.Provider.Configure(config) - *reply = ResourceProviderConfigureResponse{ - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Apply( - args *ResourceProviderApplyArgs, - result *ResourceProviderApplyResponse) error { - state, err := s.Provider.Apply(args.Info, args.State, args.Diff) - *result = ResourceProviderApplyResponse{ - State: state, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Diff( - args *ResourceProviderDiffArgs, - result *ResourceProviderDiffResponse) error { - diff, err := s.Provider.Diff(args.Info, args.State, args.Config) - *result = ResourceProviderDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Refresh( - args *ResourceProviderRefreshArgs, - result *ResourceProviderRefreshResponse) error { - newState, err := s.Provider.Refresh(args.Info, args.State) - *result = ResourceProviderRefreshResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ImportState( - args *ResourceProviderImportStateArgs, - result *ResourceProviderImportStateResponse) error { - states, err := s.Provider.ImportState(args.Info, args.Id) - *result = ResourceProviderImportStateResponse{ - State: states, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) Resources( - nothing interface{}, - result *[]terraform.ResourceType) error { - *result = s.Provider.Resources() - return nil -} - -func (s *ResourceProviderServer) ValidateDataSource( - args *ResourceProviderValidateResourceArgs, - reply *ResourceProviderValidateResourceResponse) error { - warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProviderValidateResourceResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProviderServer) ReadDataDiff( - args *ResourceProviderReadDataDiffArgs, - result *ResourceProviderReadDataDiffResponse) error { - diff, err := s.Provider.ReadDataDiff(args.Info, args.Config) - *result = ResourceProviderReadDataDiffResponse{ - Diff: diff, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) ReadDataApply( - args *ResourceProviderReadDataApplyArgs, - result *ResourceProviderReadDataApplyResponse) error { - newState, err := s.Provider.ReadDataApply(args.Info, args.Diff) - *result = ResourceProviderReadDataApplyResponse{ - State: newState, - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProviderServer) DataSources( - nothing interface{}, - result *[]terraform.DataSource) error { - *result = s.Provider.DataSources() - return nil -} diff --git a/plugin/resource_provider_test.go b/plugin/resource_provider_test.go deleted file mode 100644 index 6c3cc4546..000000000 --- a/plugin/resource_provider_test.go +++ /dev/null @@ -1,827 +0,0 @@ -package plugin - -import ( - "errors" - "reflect" - "testing" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvider_impl(t *testing.T) { - var _ plugin.Plugin = new(ResourceProviderPlugin) - var _ terraform.ResourceProvider = new(ResourceProvider) -} - -func TestResourceProvider_stop(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvider) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - // Stop - e := provider.Stop() - if !p.StopCalled { - t.Fatal("stop should be called") - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_stopErrors(t *testing.T) { - p := new(terraform.MockResourceProvider) - p.StopReturnError = errors.New("foo") - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - // Stop - e := provider.Stop() - if !p.StopCalled { - t.Fatal("stop should be called") - } - if e == nil { - t.Fatal("should have error") - } - if e.Error() != "foo" { - t.Fatalf("bad: %s", e) - } -} - -func TestResourceProvider_input(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvider) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - input := new(terraform.MockUIInput) - - expected := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"bar": "baz"}, - } - p.InputReturnConfig = expected - - // Input - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - actual, err := provider.Input(input, config) - if !p.InputCalled { - t.Fatal("input should be called") - } - if !reflect.DeepEqual(p.InputConfig, config) { - t.Fatalf("bad: %#v", p.InputConfig) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} - -func TestResourceProvider_configure(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvider) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - e := provider.Configure(config) - if !p.ConfigureCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ConfigureConfig, config) { - t.Fatalf("bad: %#v", p.ConfigureConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_configure_errors(t *testing.T) { - p := new(terraform.MockResourceProvider) - p.ConfigureReturnError = errors.New("foo") - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - e := provider.Configure(config) - if !p.ConfigureCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ConfigureConfig, config) { - t.Fatalf("bad: %#v", p.ConfigureConfig) - } - if e == nil { - t.Fatal("should have error") - } - if e.Error() != "foo" { - t.Fatalf("bad: %s", e) - } -} - -func TestResourceProvider_configure_warnings(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - e := provider.Configure(config) - if !p.ConfigureCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ConfigureConfig, config) { - t.Fatalf("bad: %#v", p.ConfigureConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_apply(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.ApplyReturn = &terraform.InstanceState{ - ID: "bob", - } - - // Apply - info := &terraform.InstanceInfo{} - state := &terraform.InstanceState{} - diff := &terraform.InstanceDiff{} - newState, err := provider.Apply(info, state, diff) - if !p.ApplyCalled { - t.Fatal("apply should be called") - } - if !reflect.DeepEqual(p.ApplyDiff, diff) { - t.Fatalf("bad: %#v", p.ApplyDiff) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - if !reflect.DeepEqual(p.ApplyReturn, newState) { - t.Fatalf("bad: %#v", newState) - } -} - -func TestResourceProvider_diff(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.DiffReturn = &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "foo": &terraform.ResourceAttrDiff{ - Old: "", - New: "bar", - }, - }, - } - - // Diff - info := &terraform.InstanceInfo{} - state := &terraform.InstanceState{} - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - diff, err := provider.Diff(info, state, config) - if !p.DiffCalled { - t.Fatal("diff should be called") - } - if !reflect.DeepEqual(p.DiffDesired, config) { - t.Fatalf("bad: %#v", p.DiffDesired) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - if !reflect.DeepEqual(p.DiffReturn, diff) { - t.Fatalf("bad: %#v", diff) - } -} - -func TestResourceProvider_diff_error(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.DiffReturnError = errors.New("foo") - - // Diff - info := &terraform.InstanceInfo{} - state := &terraform.InstanceState{} - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - diff, err := provider.Diff(info, state, config) - if !p.DiffCalled { - t.Fatal("diff should be called") - } - if !reflect.DeepEqual(p.DiffDesired, config) { - t.Fatalf("bad: %#v", p.DiffDesired) - } - if err == nil { - t.Fatal("should have error") - } - if diff != nil { - t.Fatal("should not have diff") - } -} - -func TestResourceProvider_refresh(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.RefreshReturn = &terraform.InstanceState{ - ID: "bob", - } - - // Refresh - info := &terraform.InstanceInfo{} - state := &terraform.InstanceState{} - newState, err := provider.Refresh(info, state) - if !p.RefreshCalled { - t.Fatal("refresh should be called") - } - if !reflect.DeepEqual(p.RefreshState, state) { - t.Fatalf("bad: %#v", p.RefreshState) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - if !reflect.DeepEqual(p.RefreshReturn, newState) { - t.Fatalf("bad: %#v", newState) - } -} - -func TestResourceProvider_importState(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.ImportStateReturn = []*terraform.InstanceState{ - &terraform.InstanceState{ - ID: "bob", - }, - } - - // ImportState - info := &terraform.InstanceInfo{} - states, err := provider.ImportState(info, "foo") - if !p.ImportStateCalled { - t.Fatal("ImportState should be called") - } - if !reflect.DeepEqual(p.ImportStateInfo, info) { - t.Fatalf("bad: %#v", p.ImportStateInfo) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - if !reflect.DeepEqual(p.ImportStateReturn, states) { - t.Fatalf("bad: %#v", states) - } -} - -func TestResourceProvider_resources(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - expected := []terraform.ResourceType{ - terraform.ResourceType{Name: "foo"}, - terraform.ResourceType{Name: "bar", Importable: true}, - } - - p.ResourcesReturn = expected - - // Resources - result := provider.Resources() - if !p.ResourcesCalled { - t.Fatal("resources should be called") - } - if !reflect.DeepEqual(result, expected) { - t.Fatalf("bad: %#v", result) - } -} - -func TestResourceProvider_readdataapply(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.ReadDataApplyReturn = &terraform.InstanceState{ - ID: "bob", - } - - // ReadDataApply - info := &terraform.InstanceInfo{} - diff := &terraform.InstanceDiff{} - newState, err := provider.ReadDataApply(info, diff) - if !p.ReadDataApplyCalled { - t.Fatal("ReadDataApply should be called") - } - if !reflect.DeepEqual(p.ReadDataApplyDiff, diff) { - t.Fatalf("bad: %#v", p.ReadDataApplyDiff) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } - if !reflect.DeepEqual(p.ReadDataApplyReturn, newState) { - t.Fatalf("bad: %#v", newState) - } -} - -func TestResourceProvider_datasources(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - expected := []terraform.DataSource{ - {Name: "foo"}, - {Name: "bar"}, - } - - p.DataSourcesReturn = expected - - // DataSources - result := provider.DataSources() - if !p.DataSourcesCalled { - t.Fatal("DataSources should be called") - } - if !reflect.DeepEqual(result, expected) { - t.Fatalf("bad: %#v", result) - } -} - -func TestResourceProvider_validate(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_validate_errors(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.ValidateReturnErrors = []error{errors.New("foo")} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - - if len(e) != 1 { - t.Fatalf("bad: %#v", e) - } - if e[0].Error() != "foo" { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_validate_warns(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.ValidateReturnWarns = []string{"foo"} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } - - expected := []string{"foo"} - if !reflect.DeepEqual(w, expected) { - t.Fatalf("bad: %#v", w) - } -} - -func TestResourceProvider_validateResource(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.ValidateResource("foo", config) - if !p.ValidateResourceCalled { - t.Fatal("configure should be called") - } - if p.ValidateResourceType != "foo" { - t.Fatalf("bad: %#v", p.ValidateResourceType) - } - if !reflect.DeepEqual(p.ValidateResourceConfig, config) { - t.Fatalf("bad: %#v", p.ValidateResourceConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_validateResource_errors(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.ValidateResourceReturnErrors = []error{errors.New("foo")} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.ValidateResource("foo", config) - if !p.ValidateResourceCalled { - t.Fatal("configure should be called") - } - if p.ValidateResourceType != "foo" { - t.Fatalf("bad: %#v", p.ValidateResourceType) - } - if !reflect.DeepEqual(p.ValidateResourceConfig, config) { - t.Fatalf("bad: %#v", p.ValidateResourceConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - - if len(e) != 1 { - t.Fatalf("bad: %#v", e) - } - if e[0].Error() != "foo" { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_validateResource_warns(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - p.ValidateResourceReturnWarns = []string{"foo"} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.ValidateResource("foo", config) - if !p.ValidateResourceCalled { - t.Fatal("configure should be called") - } - if p.ValidateResourceType != "foo" { - t.Fatalf("bad: %#v", p.ValidateResourceType) - } - if !reflect.DeepEqual(p.ValidateResourceConfig, config) { - t.Fatalf("bad: %#v", p.ValidateResourceConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } - - expected := []string{"foo"} - if !reflect.DeepEqual(w, expected) { - t.Fatalf("bad: %#v", w) - } -} - -func TestResourceProvider_validateDataSource(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provider.ValidateDataSource("foo", config) - if !p.ValidateDataSourceCalled { - t.Fatal("configure should be called") - } - if p.ValidateDataSourceType != "foo" { - t.Fatalf("bad: %#v", p.ValidateDataSourceType) - } - if !reflect.DeepEqual(p.ValidateDataSourceConfig, config) { - t.Fatalf("bad: %#v", p.ValidateDataSourceConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvider_close(t *testing.T) { - p := new(terraform.MockResourceProvider) - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProviderFunc: testProviderFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProviderPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvider) - - var iface interface{} = provider - pCloser, ok := iface.(terraform.ResourceProviderCloser) - if !ok { - t.Fatal("should be a ResourceProviderCloser") - } - - if err := pCloser.Close(); err != nil { - t.Fatalf("failed to close provider: %s", err) - } - - // The connection should be closed now, so if we to make a - // new call we should get an error. - err = provider.Configure(&terraform.ResourceConfig{}) - if err == nil { - t.Fatal("should have error") - } -} diff --git a/plugin/resource_provisioner.go b/plugin/resource_provisioner.go deleted file mode 100644 index 5bebc4c61..000000000 --- a/plugin/resource_provisioner.go +++ /dev/null @@ -1,181 +0,0 @@ -package plugin - -import ( - "net/rpc" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/terraform" -) - -// ResourceProvisionerPlugin is the plugin.Plugin implementation. -type ResourceProvisionerPlugin struct { - ResourceProvisioner func() terraform.ResourceProvisioner -} - -func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProvisionerServer{ - Broker: b, - Provisioner: p.ResourceProvisioner(), - }, nil -} - -func (p *ResourceProvisionerPlugin) Client( - b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { - return &ResourceProvisioner{Broker: b, Client: c}, nil -} - -// ResourceProvisioner is an implementation of terraform.ResourceProvisioner -// that communicates over RPC. -type ResourceProvisioner struct { - Broker *plugin.MuxBroker - Client *rpc.Client -} - -func (p *ResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { - panic("not implemented") -} - -func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { - var resp ResourceProvisionerValidateResponse - args := ResourceProvisionerValidateArgs{ - Config: c, - } - - err := p.Client.Call("Plugin.Validate", &args, &resp) - if err != nil { - return nil, []error{err} - } - - var errs []error - if len(resp.Errors) > 0 { - errs = make([]error, len(resp.Errors)) - for i, err := range resp.Errors { - errs[i] = err - } - } - - return resp.Warnings, errs -} - -func (p *ResourceProvisioner) Apply( - output terraform.UIOutput, - s *terraform.InstanceState, - c *terraform.ResourceConfig) error { - id := p.Broker.NextId() - go p.Broker.AcceptAndServe(id, &UIOutputServer{ - UIOutput: output, - }) - - var resp ResourceProvisionerApplyResponse - args := &ResourceProvisionerApplyArgs{ - OutputId: id, - State: s, - Config: c, - } - - err := p.Client.Call("Plugin.Apply", args, &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvisioner) Stop() error { - var resp ResourceProvisionerStopResponse - err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) - if err != nil { - return err - } - if resp.Error != nil { - err = resp.Error - } - - return err -} - -func (p *ResourceProvisioner) Close() error { - return p.Client.Close() -} - -type ResourceProvisionerValidateArgs struct { - Config *terraform.ResourceConfig -} - -type ResourceProvisionerValidateResponse struct { - Warnings []string - Errors []*plugin.BasicError -} - -type ResourceProvisionerApplyArgs struct { - OutputId uint32 - State *terraform.InstanceState - Config *terraform.ResourceConfig -} - -type ResourceProvisionerApplyResponse struct { - Error *plugin.BasicError -} - -type ResourceProvisionerStopResponse struct { - Error *plugin.BasicError -} - -// ResourceProvisionerServer is a net/rpc compatible structure for serving -// a ResourceProvisioner. This should not be used directly. -type ResourceProvisionerServer struct { - Broker *plugin.MuxBroker - Provisioner terraform.ResourceProvisioner -} - -func (s *ResourceProvisionerServer) Apply( - args *ResourceProvisionerApplyArgs, - result *ResourceProvisionerApplyResponse) error { - conn, err := s.Broker.Dial(args.OutputId) - if err != nil { - *result = ResourceProvisionerApplyResponse{ - Error: plugin.NewBasicError(err), - } - return nil - } - client := rpc.NewClient(conn) - defer client.Close() - - output := &UIOutput{Client: client} - - err = s.Provisioner.Apply(output, args.State, args.Config) - *result = ResourceProvisionerApplyResponse{ - Error: plugin.NewBasicError(err), - } - return nil -} - -func (s *ResourceProvisionerServer) Validate( - args *ResourceProvisionerValidateArgs, - reply *ResourceProvisionerValidateResponse) error { - warns, errs := s.Provisioner.Validate(args.Config) - berrs := make([]*plugin.BasicError, len(errs)) - for i, err := range errs { - berrs[i] = plugin.NewBasicError(err) - } - *reply = ResourceProvisionerValidateResponse{ - Warnings: warns, - Errors: berrs, - } - return nil -} - -func (s *ResourceProvisionerServer) Stop( - _ interface{}, - reply *ResourceProvisionerStopResponse) error { - err := s.Provisioner.Stop() - *reply = ResourceProvisionerStopResponse{ - Error: plugin.NewBasicError(err), - } - - return nil -} diff --git a/plugin/resource_provisioner_test.go b/plugin/resource_provisioner_test.go deleted file mode 100644 index 0ff3ea239..000000000 --- a/plugin/resource_provisioner_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package plugin - -import ( - "errors" - "reflect" - "testing" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform/terraform" -) - -func TestResourceProvisioner_impl(t *testing.T) { - var _ plugin.Plugin = new(ResourceProvisionerPlugin) - var _ terraform.ResourceProvisioner = new(ResourceProvisioner) -} - -func TestResourceProvisioner_stop(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvisioner) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProvisionerFunc: testProvisionerFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProvisionerPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvisioner) - - // Stop - e := provider.Stop() - if !p.StopCalled { - t.Fatal("stop should be called") - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvisioner_stopErrors(t *testing.T) { - p := new(terraform.MockResourceProvisioner) - p.StopReturnError = errors.New("foo") - - // Create a mock provider - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProvisionerFunc: testProvisionerFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProvisionerPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provider := raw.(terraform.ResourceProvisioner) - - // Stop - e := provider.Stop() - if !p.StopCalled { - t.Fatal("stop should be called") - } - if e == nil { - t.Fatal("should have error") - } - if e.Error() != "foo" { - t.Fatalf("bad: %s", e) - } -} - -func TestResourceProvisioner_apply(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvisioner) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProvisionerFunc: testProvisionerFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProvisionerPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provisioner := raw.(terraform.ResourceProvisioner) - - // Apply - output := &terraform.MockUIOutput{} - state := &terraform.InstanceState{} - conf := &terraform.ResourceConfig{} - err = provisioner.Apply(output, state, conf) - if !p.ApplyCalled { - t.Fatal("apply should be called") - } - if !reflect.DeepEqual(p.ApplyConfig, conf) { - t.Fatalf("bad: %#v", p.ApplyConfig) - } - if err != nil { - t.Fatalf("bad: %#v", err) - } -} - -func TestResourceProvisioner_validate(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvisioner) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProvisionerFunc: testProvisionerFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProvisionerPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provisioner := raw.(terraform.ResourceProvisioner) - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provisioner.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvisioner_validate_errors(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvisioner) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProvisionerFunc: testProvisionerFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProvisionerPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provisioner := raw.(terraform.ResourceProvisioner) - - p.ValidateReturnErrors = []error{errors.New("foo")} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provisioner.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if w != nil { - t.Fatalf("bad: %#v", w) - } - - if len(e) != 1 { - t.Fatalf("bad: %#v", e) - } - if e[0].Error() != "foo" { - t.Fatalf("bad: %#v", e) - } -} - -func TestResourceProvisioner_validate_warns(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvisioner) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProvisionerFunc: testProvisionerFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProvisionerPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provisioner := raw.(terraform.ResourceProvisioner) - - p.ValidateReturnWarns = []string{"foo"} - - // Configure - config := &terraform.ResourceConfig{ - Raw: map[string]interface{}{"foo": "bar"}, - } - w, e := provisioner.Validate(config) - if !p.ValidateCalled { - t.Fatal("configure should be called") - } - if !reflect.DeepEqual(p.ValidateConfig, config) { - t.Fatalf("bad: %#v", p.ValidateConfig) - } - if e != nil { - t.Fatalf("bad: %#v", e) - } - - expected := []string{"foo"} - if !reflect.DeepEqual(w, expected) { - t.Fatalf("bad: %#v", w) - } -} - -func TestResourceProvisioner_close(t *testing.T) { - // Create a mock provider - p := new(terraform.MockResourceProvisioner) - client, _ := plugin.TestPluginRPCConn(t, legacyPluginMap(&ServeOpts{ - ProvisionerFunc: testProvisionerFixed(p), - }), nil) - defer client.Close() - - // Request the provider - raw, err := client.Dispense(ProvisionerPluginName) - if err != nil { - t.Fatalf("err: %s", err) - } - provisioner := raw.(terraform.ResourceProvisioner) - - pCloser, ok := raw.(terraform.ResourceProvisionerCloser) - if !ok { - t.Fatal("should be a ResourceProvisionerCloser") - } - - if err := pCloser.Close(); err != nil { - t.Fatalf("failed to close provisioner: %s", err) - } - - // The connection should be closed now, so if we to make a - // new call we should get an error. - o := &terraform.MockUIOutput{} - s := &terraform.InstanceState{} - c := &terraform.ResourceConfig{} - err = provisioner.Apply(o, s, c) - if err == nil { - t.Fatal("should have error") - } -} diff --git a/plugin/serve.go b/plugin/serve.go index 8d056c591..27d3c9e6d 100644 --- a/plugin/serve.go +++ b/plugin/serve.go @@ -2,9 +2,7 @@ package plugin import ( "github.com/hashicorp/go-plugin" - grpcplugin "github.com/hashicorp/terraform/helper/plugin" proto "github.com/hashicorp/terraform/internal/tfplugin5" - "github.com/hashicorp/terraform/terraform" ) const ( @@ -35,16 +33,11 @@ var Handshake = plugin.HandshakeConfig{ MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", } -type ProviderFunc func() terraform.ResourceProvider -type ProvisionerFunc func() terraform.ResourceProvisioner type GRPCProviderFunc func() proto.ProviderServer type GRPCProvisionerFunc func() proto.ProvisionerServer // ServeOpts are the configurations to serve a plugin. type ServeOpts struct { - ProviderFunc ProviderFunc - ProvisionerFunc ProvisionerFunc - // Wrapped versions of the above plugins will automatically shimmed and // added to the GRPC functions when possible. GRPCProviderFunc GRPCProviderFunc @@ -54,27 +47,6 @@ type ServeOpts struct { // Serve serves a plugin. This function never returns and should be the final // function called in the main function of the plugin. func Serve(opts *ServeOpts) { - // since the plugins may not yet be aware of the new protocol, we - // automatically wrap the plugins in the grpc shims. - if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { - provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc()) - // this is almost always going to be a *schema.Provider, but check that - // we got back a valid provider just in case. - if provider != nil { - opts.GRPCProviderFunc = func() proto.ProviderServer { - return provider - } - } - } - if opts.GRPCProvisionerFunc == nil && opts.ProvisionerFunc != nil { - provisioner := grpcplugin.NewGRPCProvisionerServerShim(opts.ProvisionerFunc()) - if provisioner != nil { - opts.GRPCProvisionerFunc = func() proto.ProvisionerServer { - return provisioner - } - } - } - plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: Handshake, VersionedPlugins: pluginSet(opts), @@ -82,26 +54,8 @@ func Serve(opts *ServeOpts) { }) } -// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring -// a plugin server or client. -func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin { - return map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{ - ResourceProvider: opts.ProviderFunc, - }, - "provisioner": &ResourceProvisionerPlugin{ - ResourceProvisioner: opts.ProvisionerFunc, - }, - } -} - func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { - // Set the legacy netrpc plugins at version 4. - // The oldest version is returned in when executed by a legacy go-plugin - // client. - plugins := map[int]plugin.PluginSet{ - 4: legacyPluginMap(opts), - } + plugins := map[int]plugin.PluginSet{} // add the new protocol versions if they're configured if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil { diff --git a/providers/provider.go b/providers/provider.go index 2d9db44bb..24f06a6cb 100644 --- a/providers/provider.go +++ b/providers/provider.go @@ -14,8 +14,10 @@ type Interface interface { // GetSchema returns the complete schema for the provider. GetSchema() GetSchemaResponse - // PrepareProviderConfig allows the provider to validate the configuration - // values, and set or override any values with defaults. + // PrepareProviderConfig allows the provider to validate the configuration. + // The PrepareProviderConfigResponse.PreparedConfig field is unused. The + // final configuration is not stored in the state, and any modifications + // that need to be made must be made during the Configure method call. PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse // ValidateResourceTypeConfig allows the provider to validate the resource @@ -99,7 +101,7 @@ type PrepareProviderConfigRequest struct { } type PrepareProviderConfigResponse struct { - // PreparedConfig is the configuration as prepared by the provider. + // PreparedConfig is unused. PreparedConfig cty.Value // Diagnostics contains any warnings or errors from the method call. Diagnostics tfdiags.Diagnostics diff --git a/registry/client.go b/registry/client.go index 6770d7188..03e6f7c96 100644 --- a/registry/client.go +++ b/registry/client.go @@ -65,10 +65,6 @@ type Client struct { // services is a required *disco.Disco, which may have services and // credentials pre-loaded. services *disco.Disco - - // retry is the number of retries the client will attempt for each request - // if it runs into a transient failure with the remote registry. - retry int } // NewClient returns a new initialized registry client. diff --git a/registry/response/pagination_test.go b/registry/response/pagination_test.go index be862abbd..09c78e6c2 100644 --- a/registry/response/pagination_test.go +++ b/registry/response/pagination_test.go @@ -5,10 +5,6 @@ import ( "testing" ) -func intPtr(i int) *int { - return &i -} - func prettyJSON(o interface{}) (string, error) { bytes, err := json.MarshalIndent(o, "", "\t") if err != nil { diff --git a/registry/test/mock_registry.go b/registry/test/mock_registry.go index c6a8ef70d..00ead006e 100644 --- a/registry/test/mock_registry.go +++ b/registry/test/mock_registry.go @@ -8,10 +8,8 @@ import ( "net/http/httptest" "os" "regexp" - "sort" "strings" - version "github.com/hashicorp/go-version" svchost "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/auth" "github.com/hashicorp/terraform-svchost/disco" @@ -51,8 +49,6 @@ type testMod struct { // Only one version for now, as we only lookup latest from the registry. type testProvider struct { version string - os string - arch string url string } @@ -135,20 +131,6 @@ func init() { } } -func latestVersion(versions []string) string { - var col version.Collection - for _, v := range versions { - ver, err := version.NewVersion(v) - if err != nil { - panic(err) - } - col = append(col, ver) - } - - sort.Sort(col) - return col[len(col)-1].String() -} - func mockRegHandler() http.Handler { mux := http.NewServeMux() @@ -188,7 +170,6 @@ func mockRegHandler() http.Handler { w.Header().Set("X-Terraform-Get", location) w.WriteHeader(http.StatusNoContent) // no body - return } moduleVersions := func(w http.ResponseWriter, r *http.Request) { diff --git a/repl/format.go b/repl/format.go index 0aa19efd5..14e09d708 100644 --- a/repl/format.go +++ b/repl/format.go @@ -46,12 +46,13 @@ func FormatValue(v cty.Value, indent int) string { case ty.IsPrimitiveType(): switch ty { case cty.String: - // FIXME: If it's a multi-line string, better to render it using - // HEREDOC-style syntax. + if formatted, isMultiline := formatMultilineString(v, indent); isMultiline { + return formatted + } return strconv.Quote(v.AsString()) case cty.Number: bf := v.AsBigFloat() - return bf.Text('g', -1) + return bf.Text('f', -1) case cty.Bool: if v.True() { return "true" @@ -75,6 +76,56 @@ func FormatValue(v cty.Value, indent int) string { return fmt.Sprintf("%#v", v) } +func formatMultilineString(v cty.Value, indent int) (string, bool) { + str := v.AsString() + lines := strings.Split(str, "\n") + if len(lines) < 2 { + return "", false + } + + // If the value is indented, we use the indented form of heredoc for readability. + operator := "<<" + if indent > 0 { + operator = "<<-" + } + + // Default delimiter is "End Of Text" by convention + delimiter := "EOT" + +OUTER: + for { + // Check if any of the lines are in conflict with the delimiter. The + // parser allows leading and trailing whitespace, so we must remove it + // before comparison. + for _, line := range lines { + // If the delimiter matches a line, extend it and start again + if strings.TrimSpace(line) == delimiter { + delimiter = delimiter + "_" + continue OUTER + } + } + + // None of the lines match the delimiter, so we're ready + break + } + + // Write the heredoc, with indentation as appropriate. + var buf strings.Builder + + buf.WriteString(operator) + buf.WriteString(delimiter) + for _, line := range lines { + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(line) + } + buf.WriteByte('\n') + buf.WriteString(strings.Repeat(" ", indent)) + buf.WriteString(delimiter) + + return buf.String(), true +} + func formatMappingValue(v cty.Value, indent int) string { var buf strings.Builder count := 0 diff --git a/repl/format_test.go b/repl/format_test.go index 108d32dc9..6d4e484bd 100644 --- a/repl/format_test.go +++ b/repl/format_test.go @@ -58,7 +58,28 @@ func TestFormatValue(t *testing.T) { }, { cty.StringVal("hello\nworld"), - `"hello\nworld"`, // Ideally we'd use heredoc syntax here for better readability, but we don't yet + `< /dev/null; then diff --git a/scripts/generate-plugins.go b/scripts/generate-plugins.go deleted file mode 100644 index 24030bcf6..000000000 --- a/scripts/generate-plugins.go +++ /dev/null @@ -1,285 +0,0 @@ -// Generate Plugins is a small program that updates the lists of plugins in -// command/internal_plugin_list.go so they will be compiled into the main -// terraform binary. -package main - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strings" -) - -const target = "command/internal_plugin_list.go" - -func main() { - if isProjectRoot() == false { - log.Fatalf("This program must be invoked in the terraform project root") - } - - //// Collect all of the data we need about plugins we have in the project - //providers, err := discoverProviders() - //if err != nil { - // log.Fatalf("Failed to discover providers: %s", err) - //} - - provisioners, err := discoverProvisioners() - if err != nil { - log.Fatalf("Failed to discover provisioners: %s", err) - } - - // Do some simple code generation and templating - output := source - output = strings.Replace(output, "IMPORTS", makeImports(nil, provisioners), 1) - //output = strings.Replace(output, "PROVIDERS", makeProviderMap(providers), 1) - output = strings.Replace(output, "PROVISIONERS", makeProvisionerMap(provisioners), 1) - - // TODO sort the lists of plugins so we are not subjected to random OS ordering of the plugin lists - - // Write our generated code to the command/plugin.go file - file, err := os.Create(target) - defer file.Close() - if err != nil { - log.Fatalf("Failed to open %s for writing: %s", target, err) - } - - _, err = file.WriteString(output) - if err != nil { - log.Fatalf("Failed writing to %s: %s", target, err) - } - - log.Printf("Generated %s", target) -} - -type plugin struct { - Package string // Package name from ast remoteexec - PluginName string // Path via deriveName() remote-exec - TypeName string // Type of plugin provisioner - Path string // Relative import path builtin/provisioners/remote-exec - ImportName string // See deriveImport() remoteexecprovisioner -} - -// makeProviderMap creates a map of providers like this: -// -// var InternalProviders = map[string]plugin.ProviderFunc{ -// "aws": aws.Provider, -// "azurerm": azurerm.Provider, -// "cloudflare": cloudflare.Provider, -func makeProviderMap(items []plugin) string { - output := "" - for _, item := range items { - output += fmt.Sprintf("\t\"%s\": %s.%s,\n", item.PluginName, item.ImportName, item.TypeName) - } - return output -} - -func isProjectRoot() bool { - _, err := os.Stat("go.mod") - if os.IsNotExist(err) { - return false - } - - return true -} - -// makeProvisionerMap creates a map of provisioners like this: -// -// "chef": chefprovisioner.Provisioner, -// "salt-masterless": saltmasterlessprovisioner.Provisioner, -// "file": fileprovisioner.Provisioner, -// "local-exec": localexecprovisioner.Provisioner, -// "remote-exec": remoteexecprovisioner.Provisioner, -// -func makeProvisionerMap(items []plugin) string { - output := "" - for _, item := range items { - output += fmt.Sprintf("\t\"%s\": %s.%s,\n", item.PluginName, item.ImportName, item.TypeName) - } - return output -} - -func makeImports(providers, provisioners []plugin) string { - plugins := []string{} - - for _, provider := range providers { - plugins = append(plugins, fmt.Sprintf("\t%s \"github.com/hashicorp/terraform/%s\"\n", provider.ImportName, filepath.ToSlash(provider.Path))) - } - - for _, provisioner := range provisioners { - plugins = append(plugins, fmt.Sprintf("\t%s \"github.com/hashicorp/terraform/%s\"\n", provisioner.ImportName, filepath.ToSlash(provisioner.Path))) - } - - // Make things pretty - sort.Strings(plugins) - - return strings.Join(plugins, "") -} - -// listDirectories recursively lists directories under the specified path -func listDirectories(path string) ([]string, error) { - names := []string{} - items, err := ioutil.ReadDir(path) - if err != nil { - return names, err - } - - for _, item := range items { - // We only want directories - if item.IsDir() { - if item.Name() == "testdata" { - continue - } - currentDir := filepath.Join(path, item.Name()) - names = append(names, currentDir) - - // Do some recursion - subNames, err := listDirectories(currentDir) - if err == nil { - names = append(names, subNames...) - } - } - } - - return names, nil -} - -// deriveName determines the name of the plugin relative to the specified root -// path. -func deriveName(root, full string) string { - short, _ := filepath.Rel(root, full) - bits := strings.Split(short, string(os.PathSeparator)) - return strings.Join(bits, "-") -} - -// deriveImport will build a unique import identifier based on packageName and -// the result of deriveName(). This is important for disambigutating between -// providers and provisioners that have the same name. This will be something -// like: -// -// remote-exec -> remoteexecprovisioner -// -// which is long, but is deterministic and unique. -func deriveImport(typeName, derivedName string) string { - return strings.Replace(derivedName, "-", "", -1) + strings.ToLower(typeName) -} - -// discoverTypesInPath searches for types of typeID in path using go's ast and -// returns a list of plugins it finds. -func discoverTypesInPath(path, typeID, typeName string) ([]plugin, error) { - pluginTypes := []plugin{} - - dirs, err := listDirectories(path) - if err != nil { - return pluginTypes, err - } - - for _, dir := range dirs { - fset := token.NewFileSet() - goPackages, err := parser.ParseDir(fset, dir, nil, parser.AllErrors) - if err != nil { - return pluginTypes, fmt.Errorf("Failed parsing directory %s: %s", dir, err) - } - - for _, goPackage := range goPackages { - ast.PackageExports(goPackage) - ast.Inspect(goPackage, func(n ast.Node) bool { - switch x := n.(type) { - case *ast.FuncDecl: - // If we get a function then we will check the function name - // against typeName and the function return type (Results) - // against typeID. - // - // There may be more than one return type but in the target - // case there should only be one. Also the return type is a - // ast.SelectorExpr which means we have multiple nodes. - // We'll read all of them as ast.Ident (identifier), join - // them via . to get a string like terraform.ResourceProvider - // and see if it matches our expected typeID - // - // This is somewhat verbose but prevents us from identifying - // the wrong types if the function name is amiguous or if - // there are other subfolders added later. - if x.Name.Name == typeName && len(x.Type.Results.List) == 1 { - node := x.Type.Results.List[0].Type - typeIdentifiers := []string{} - ast.Inspect(node, func(m ast.Node) bool { - switch y := m.(type) { - case *ast.Ident: - typeIdentifiers = append(typeIdentifiers, y.Name) - } - // We need all of the identifiers to join so we - // can't break early here. - return true - }) - if strings.Join(typeIdentifiers, ".") == typeID { - derivedName := deriveName(path, dir) - pluginTypes = append(pluginTypes, plugin{ - Package: goPackage.Name, - PluginName: derivedName, - ImportName: deriveImport(x.Name.Name, derivedName), - TypeName: x.Name.Name, - Path: dir, - }) - } - } - case *ast.TypeSpec: - // In the simpler case we will simply check whether the type - // declaration has the name we were looking for. - if x.Name.Name == typeID { - derivedName := deriveName(path, dir) - pluginTypes = append(pluginTypes, plugin{ - Package: goPackage.Name, - PluginName: derivedName, - ImportName: deriveImport(x.Name.Name, derivedName), - TypeName: x.Name.Name, - Path: dir, - }) - // The AST stops parsing when we return false. Once we - // find the symbol we want we can stop parsing. - return false - } - } - return true - }) - } - } - - return pluginTypes, nil -} - -func discoverProviders() ([]plugin, error) { - path := "./builtin/providers" - typeID := "terraform.ResourceProvider" - typeName := "Provider" - return discoverTypesInPath(path, typeID, typeName) -} - -func discoverProvisioners() ([]plugin, error) { - path := "./builtin/provisioners" - typeID := "terraform.ResourceProvisioner" - typeName := "Provisioner" - return discoverTypesInPath(path, typeID, typeName) -} - -const source = `// -// This file is automatically generated by scripts/generate-plugins.go -- Do not edit! -// -package command - -import ( -IMPORTS - "github.com/hashicorp/terraform/plugin" -) - -var InternalProviders = map[string]plugin.ProviderFunc{} - -var InternalProvisioners = map[string]plugin.ProvisionerFunc{ -PROVISIONERS -} -` diff --git a/scripts/generate-plugins_test.go b/scripts/generate-plugins_test.go deleted file mode 100644 index cba015b85..000000000 --- a/scripts/generate-plugins_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package main - -import "testing" - -func TestMakeProvisionerMap(t *testing.T) { - p := makeProvisionerMap([]plugin{ - { - Package: "file", - PluginName: "file", - TypeName: "Provisioner", - Path: "builtin/provisioners/file", - ImportName: "fileprovisioner", - }, - { - Package: "localexec", - PluginName: "local-exec", - TypeName: "Provisioner", - Path: "builtin/provisioners/local-exec", - ImportName: "localexecprovisioner", - }, - { - Package: "remoteexec", - PluginName: "remote-exec", - TypeName: "Provisioner", - Path: "builtin/provisioners/remote-exec", - ImportName: "remoteexecprovisioner", - }, - }) - - expected := ` "file": fileprovisioner.Provisioner, - "local-exec": localexecprovisioner.Provisioner, - "remote-exec": remoteexecprovisioner.Provisioner, -` - - if p != expected { - t.Errorf("Provisioner output does not match expected format.\n -- Expected -- \n%s\n -- Found --\n%s\n", expected, p) - } -} - -func TestDeriveName(t *testing.T) { - actual := deriveName("builtin/provisioners", "builtin/provisioners/magic/remote-exec") - expected := "magic-remote-exec" - if actual != expected { - t.Errorf("Expected %s; found %s", expected, actual) - } -} - -func TestDeriveImport(t *testing.T) { - actual := deriveImport("provider", "magic-aws") - expected := "magicawsprovider" - if actual != expected { - t.Errorf("Expected %s; found %s", expected, actual) - } -} - -func contains(plugins []plugin, name string) bool { - for _, plugin := range plugins { - if plugin.PluginName == name { - return true - } - } - return false -} - -//func TestDiscoverTypesProviders(t *testing.T) { -// plugins, err := discoverTypesInPath("../builtin/providers", "terraform.ResourceProvider", "Provider") -// if err != nil { -// t.Fatalf(err.Error()) -// } -// // We're just going to spot-check, not do this exhaustively -// if !contains(plugins, "aws") { -// t.Errorf("Expected to find aws provider") -// } -// if !contains(plugins, "docker") { -// t.Errorf("Expected to find docker provider") -// } -// if !contains(plugins, "dnsimple") { -// t.Errorf("Expected to find dnsimple provider") -// } -// if !contains(plugins, "triton") { -// t.Errorf("Expected to find triton provider") -// } -// if contains(plugins, "file") { -// t.Errorf("Found unexpected provider file") -// } -//} - -func TestDiscoverTypesProvisioners(t *testing.T) { - plugins, err := discoverTypesInPath("../builtin/provisioners", "terraform.ResourceProvisioner", "Provisioner") - if err != nil { - t.Fatalf(err.Error()) - } - if !contains(plugins, "remote-exec") { - t.Errorf("Expected to find remote-exec provisioner") - } - if contains(plugins, "aws") { - t.Errorf("Found unexpected provisioner aws") - } -} diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh index 7e6fbddf7..9a341da94 100755 --- a/scripts/gofmtcheck.sh +++ b/scripts/gofmtcheck.sh @@ -6,7 +6,7 @@ gofmt_files=$(gofmt -l `find . -name '*.go' | grep -v vendor`) if [[ -n ${gofmt_files} ]]; then echo 'gofmt needs running on the following files:' echo "${gofmt_files}" - echo "You can use the command: \`make fmtcheck\` to reformat code." + echo "You can use the command: \`gofmt -w .\` to reformat code." exit 1 fi diff --git a/states/instance_generation.go b/states/instance_generation.go index 617ad4ea6..891adc003 100644 --- a/states/instance_generation.go +++ b/states/instance_generation.go @@ -18,7 +18,3 @@ type Generation interface { // CurrentGen is the Generation representing the currently-active object for // a resource instance. var CurrentGen Generation - -type currentGen struct{} - -func (g currentGen) generation() {} diff --git a/states/remote/remote.go b/states/remote/remote.go index d3d3f7b2b..0dab1863c 100644 --- a/states/remote/remote.go +++ b/states/remote/remote.go @@ -1,8 +1,6 @@ package remote import ( - "fmt" - "github.com/hashicorp/terraform/states/statemgr" ) @@ -38,18 +36,3 @@ type Payload struct { // Factory is the factory function to create a remote client. type Factory func(map[string]string) (Client, error) - -// NewClient returns a new Client with the given type and configuration. -// The client is looked up in the BuiltinClients variable. -func NewClient(t string, conf map[string]string) (Client, error) { - f, ok := BuiltinClients[t] - if !ok { - return nil, fmt.Errorf("unknown remote client type: %s", t) - } - - return f(conf) -} - -// BuiltinClients is the list of built-in clients that can be used with -// NewClient. -var BuiltinClients = map[string]Factory{} diff --git a/states/remote/remote_test.go b/states/remote/remote_test.go index 1e8edc8b8..55e23342a 100644 --- a/states/remote/remote_test.go +++ b/states/remote/remote_test.go @@ -1,50 +1,11 @@ package remote import ( - "bytes" "crypto/md5" "encoding/json" "testing" - - "github.com/hashicorp/terraform/states/statefile" - "github.com/hashicorp/terraform/states/statemgr" ) -// testClient is a generic function to test any client. -func testClient(t *testing.T, c Client) { - var buf bytes.Buffer - s := statemgr.TestFullInitialState() - sf := &statefile.File{State: s} - if err := statefile.Write(sf, &buf); err != nil { - t.Fatalf("err: %s", err) - } - data := buf.Bytes() - - if err := c.Put(data); err != nil { - t.Fatalf("put: %s", err) - } - - p, err := c.Get() - if err != nil { - t.Fatalf("get: %s", err) - } - if !bytes.Equal(p.Data, data) { - t.Fatalf("bad: %#v", p) - } - - if err := c.Delete(); err != nil { - t.Fatalf("delete: %s", err) - } - - p, err = c.Get() - if err != nil { - t.Fatalf("get: %s", err) - } - if p != nil { - t.Fatalf("bad: %#v", p) - } -} - func TestRemoteClient_noPayload(t *testing.T) { s := &State{ Client: nilClient{}, diff --git a/states/resource.go b/states/resource.go index 0b6a45092..28223671d 100644 --- a/states/resource.go +++ b/states/resource.go @@ -135,7 +135,7 @@ func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObject return i.Deposed[dk] } if gen == nil { - panic(fmt.Sprintf("get with nil Generation")) + panic("get with nil Generation") } // Should never fall out here, since the above covers all possible // Generation values. diff --git a/states/state_deepcopy.go b/states/state_deepcopy.go index 93e96d756..ad3610a72 100644 --- a/states/state_deepcopy.go +++ b/states/state_deepcopy.go @@ -101,18 +101,18 @@ func (rs *Resource) DeepCopy() *Resource { // is the caller's responsibility to ensure mutual exclusion for the duration // of the operation, but may then freely modify the receiver and the returned // copy independently once this method returns. -func (is *ResourceInstance) DeepCopy() *ResourceInstance { - if is == nil { +func (i *ResourceInstance) DeepCopy() *ResourceInstance { + if i == nil { return nil } - deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) - for k, obj := range is.Deposed { + deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(i.Deposed)) + for k, obj := range i.Deposed { deposed[k] = obj.DeepCopy() } return &ResourceInstance{ - Current: is.Current.DeepCopy(), + Current: i.Current.DeepCopy(), Deposed: deposed, } } @@ -125,54 +125,54 @@ func (is *ResourceInstance) DeepCopy() *ResourceInstance { // It is the caller's responsibility to ensure mutual exclusion for the duration // of the operation, but may then freely modify the receiver and the returned // copy independently once this method returns. -func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { - if obj == nil { +func (os *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { + if os == nil { return nil } var attrsFlat map[string]string - if obj.AttrsFlat != nil { - attrsFlat = make(map[string]string, len(obj.AttrsFlat)) - for k, v := range obj.AttrsFlat { + if os.AttrsFlat != nil { + attrsFlat = make(map[string]string, len(os.AttrsFlat)) + for k, v := range os.AttrsFlat { attrsFlat[k] = v } } var attrsJSON []byte - if obj.AttrsJSON != nil { - attrsJSON = make([]byte, len(obj.AttrsJSON)) - copy(attrsJSON, obj.AttrsJSON) + if os.AttrsJSON != nil { + attrsJSON = make([]byte, len(os.AttrsJSON)) + copy(attrsJSON, os.AttrsJSON) } var attrPaths []cty.PathValueMarks - if obj.AttrSensitivePaths != nil { - attrPaths = make([]cty.PathValueMarks, len(obj.AttrSensitivePaths)) - copy(attrPaths, obj.AttrSensitivePaths) + if os.AttrSensitivePaths != nil { + attrPaths = make([]cty.PathValueMarks, len(os.AttrSensitivePaths)) + copy(attrPaths, os.AttrSensitivePaths) } var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) + if os.Private != nil { + private = make([]byte, len(os.Private)) + copy(private, os.Private) } // Some addrs.Referencable implementations are technically mutable, but // we treat them as immutable by convention and so we don't deep-copy here. var dependencies []addrs.ConfigResource - if obj.Dependencies != nil { - dependencies = make([]addrs.ConfigResource, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) + if os.Dependencies != nil { + dependencies = make([]addrs.ConfigResource, len(os.Dependencies)) + copy(dependencies, os.Dependencies) } return &ResourceInstanceObjectSrc{ - Status: obj.Status, - SchemaVersion: obj.SchemaVersion, + Status: os.Status, + SchemaVersion: os.SchemaVersion, Private: private, AttrsFlat: attrsFlat, AttrsJSON: attrsJSON, AttrSensitivePaths: attrPaths, Dependencies: dependencies, - CreateBeforeDestroy: obj.CreateBeforeDestroy, + CreateBeforeDestroy: os.CreateBeforeDestroy, } } @@ -184,28 +184,28 @@ func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { // is the caller's responsibility to ensure mutual exclusion for the duration // of the operation, but may then freely modify the receiver and the returned // copy independently once this method returns. -func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { - if obj == nil { +func (o *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { + if o == nil { return nil } var private []byte - if obj.Private != nil { - private = make([]byte, len(obj.Private)) - copy(private, obj.Private) + if o.Private != nil { + private = make([]byte, len(o.Private)) + copy(private, o.Private) } // Some addrs.Referenceable implementations are technically mutable, but // we treat them as immutable by convention and so we don't deep-copy here. var dependencies []addrs.ConfigResource - if obj.Dependencies != nil { - dependencies = make([]addrs.ConfigResource, len(obj.Dependencies)) - copy(dependencies, obj.Dependencies) + if o.Dependencies != nil { + dependencies = make([]addrs.ConfigResource, len(o.Dependencies)) + copy(dependencies, o.Dependencies) } return &ResourceInstanceObject{ - Value: obj.Value, - Status: obj.Status, + Value: o.Value, + Status: o.Status, Private: private, Dependencies: dependencies, } diff --git a/states/state_string.go b/states/state_string.go index 680acf7a4..0f74d5965 100644 --- a/states/state_string.go +++ b/states/state_string.go @@ -76,18 +76,18 @@ func (s *State) String() string { // testString is used to produce part of the output of State.String. It should // never be used directly. -func (m *Module) testString() string { +func (ms *Module) testString() string { var buf bytes.Buffer - if len(m.Resources) == 0 { + if len(ms.Resources) == 0 { buf.WriteString("") } // We use AbsResourceInstance here, even though everything belongs to // the same module, just because we have a sorting behavior defined // for those but not for just ResourceInstance. - addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) - for _, rs := range m.Resources { + addrsOrder := make([]addrs.AbsResourceInstance, 0, len(ms.Resources)) + for _, rs := range ms.Resources { for ik := range rs.Instances { addrsOrder = append(addrsOrder, rs.Addr.Instance(ik)) } @@ -99,8 +99,8 @@ func (m *Module) testString() string { for _, fakeAbsAddr := range addrsOrder { addr := fakeAbsAddr.Resource - rs := m.Resource(addr.ContainingResource()) - is := m.ResourceInstance(addr) + rs := ms.Resource(addr.ContainingResource()) + is := ms.ResourceInstance(addr) // Here we need to fake up a legacy-style address as the old state // types would've used, since that's what our tests against those @@ -197,24 +197,24 @@ func (m *Module) testString() string { } if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { - buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + buf.WriteString("\n Dependencies:\n") for _, dep := range obj.Dependencies { buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) } } } - if len(m.OutputValues) > 0 { + if len(ms.OutputValues) > 0 { buf.WriteString("\nOutputs:\n\n") - ks := make([]string, 0, len(m.OutputValues)) - for k := range m.OutputValues { + ks := make([]string, 0, len(ms.OutputValues)) + for k := range ms.OutputValues { ks = append(ks, k) } sort.Strings(ks) for _, k := range ks { - v := m.OutputValues[k] + v := ms.OutputValues[k] lv := hcl2shim.ConfigValueFromHCL2(v.Value) switch vTyped := lv.(type) { case string: diff --git a/states/statefile/read.go b/states/statefile/read.go index d691c0290..8abd3be14 100644 --- a/states/statefile/read.go +++ b/states/statefile/read.go @@ -62,15 +62,6 @@ func Read(r io.Reader) (*File, error) { panic("readState returned nil state with no errors") } - if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { - return state, fmt.Errorf( - "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", - state.TerraformVersion, - tfversion.SemVer, - state.TerraformVersion, - ) - } - return state, diags.Err() } diff --git a/states/statefile/testdata/roundtrip/v4-future.in.tfstate b/states/statefile/testdata/roundtrip/v4-future.in.tfstate new file mode 100644 index 000000000..71d759bc7 --- /dev/null +++ b/states/statefile/testdata/roundtrip/v4-future.in.tfstate @@ -0,0 +1,60 @@ +{ + "version": 4, + "serial": 0, + "lineage": "f2968801-fa14-41ab-a044-224f3a4adf04", + "terraform_version": "999.0.0", + "outputs": { + "numbers": { + "type": "string", + "value": "0,1" + } + }, + "resources": [ + { + "mode": "managed", + "type": "null_resource", + "name": "bar", + "provider": "provider[\"registry.terraform.io/-/null\"]", + "instances": [ + { + "schema_version": 0, + "attributes_flat": { + "id": "5388490630832483079", + "triggers.%": "1", + "triggers.whaaat": "0,1" + }, + "depends_on": [ + "null_resource.foo" + ] + } + ] + }, + { + "mode": "managed", + "type": "null_resource", + "name": "foo", + "provider": "provider[\"registry.terraform.io/-/null\"]", + "each": "list", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes_flat": { + "id": "8212585058302700791", + "triggers.%": "1", + "triggers.what": "0" + } + }, + { + "index_key": 1, + "schema_version": 0, + "attributes_flat": { + "id": "1523897709610803586", + "triggers.%": "1", + "triggers.what": "0" + } + } + ] + } + ] +} diff --git a/states/statefile/testdata/roundtrip/v4-future.out.tfstate b/states/statefile/testdata/roundtrip/v4-future.out.tfstate new file mode 120000 index 000000000..b4037372e --- /dev/null +++ b/states/statefile/testdata/roundtrip/v4-future.out.tfstate @@ -0,0 +1 @@ +v4-future.in.tfstate \ No newline at end of file diff --git a/states/statefile/version1.go b/states/statefile/version1.go index 80d711bc8..2a5edc01b 100644 --- a/states/statefile/version1.go +++ b/states/statefile/version1.go @@ -165,10 +165,3 @@ type instanceStateV1 struct { // external client code. Meta map[string]string `json:"meta,omitempty"` } - -type ephemeralStateV1 struct { - // ConnInfo is used for the providers to export information which is - // used to connect to the resource for provisioning. For example, - // this could contain SSH or WinRM credentials. - ConnInfo map[string]string `json:"-"` -} diff --git a/states/statefile/version2.go b/states/statefile/version2.go index be93924a7..9f74815ea 100644 --- a/states/statefile/version2.go +++ b/states/statefile/version2.go @@ -3,7 +3,6 @@ package statefile import ( "encoding/json" "fmt" - "sync" "github.com/hashicorp/terraform/tfdiags" ) @@ -95,8 +94,6 @@ type outputStateV2 struct { // Value contains the value of the output, in the structure described // by the Type field. Value interface{} `json:"value"` - - mu sync.Mutex } type moduleStateV2 struct { @@ -178,8 +175,6 @@ type resourceStateV2 struct { // e.g. "aws_instance" goes with the "aws" provider. // If the resource block contained a "provider" key, that value will be set here. Provider string `json:"provider"` - - mu sync.Mutex } type instanceStateV2 struct { diff --git a/states/statefile/version3_upgrade.go b/states/statefile/version3_upgrade.go index e54a08ccd..41b0f6b45 100644 --- a/states/statefile/version3_upgrade.go +++ b/states/statefile/version3_upgrade.go @@ -3,7 +3,6 @@ package statefile import ( "encoding/json" "fmt" - "log" "strconv" "strings" @@ -336,35 +335,6 @@ func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, } } - dependencies := make([]string, 0, len(rsOld.Dependencies)) - for _, v := range rsOld.Dependencies { - depStr, err := parseLegacyDependency(v) - if err != nil { - // We just drop invalid dependencies on the floor here, because - // they tend to get left behind in Terraform 0.11 when resources - // are renamed or moved between modules and there's no automatic - // way to fix them here. In practice it shouldn't hurt to miss - // a few dependency edges in the state because a subsequent plan - // will run a refresh walk first and re-synchronize the - // dependencies with the configuration. - // - // There is one rough edges where this can cause an incorrect - // result, though: If the first command the user runs after - // upgrading to Terraform 0.12 uses -refresh=false and thus - // prevents the dependency reorganization from occurring _and_ - // that initial plan discovered "orphaned" resources (not present - // in configuration any longer) then when the plan is applied the - // destroy ordering will be incorrect for the instances of those - // resources. We expect that is a rare enough situation that it - // isn't a big deal, and even when it _does_ occur it's common for - // the apply to succeed anyway unless many separate resources with - // complex inter-dependencies are all orphaned at once. - log.Printf("statefile: ignoring invalid dependency address %q while upgrading from state version 3 to version 4: %s", v, err) - continue - } - dependencies = append(dependencies, depStr) - } - return &instanceObjectStateV4{ IndexKey: instKeyRaw, Status: status, @@ -473,28 +443,3 @@ func simplifyImpliedValueType(ty cty.Type) cty.Type { return ty } } - -func parseLegacyDependency(s string) (string, error) { - parts := strings.Split(s, ".") - ret := parts[0] - for _, part := range parts[1:] { - if part == "*" { - break - } - if i, err := strconv.Atoi(part); err == nil { - ret = ret + fmt.Sprintf("[%d]", i) - break - } - ret = ret + "." + part - } - - // The result must parse as a reference, or else we'll create an invalid - // state file. - var diags tfdiags.Diagnostics - _, diags = addrs.ParseRefStr(ret) - if diags.HasErrors() { - return "", diags.Err() - } - - return ret, nil -} diff --git a/states/statemgr/filesystem.go b/states/statemgr/filesystem.go index 138e57dae..737829161 100644 --- a/states/statemgr/filesystem.go +++ b/states/statemgr/filesystem.go @@ -184,7 +184,7 @@ func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { } s.file.State = state.DeepCopy() - if _, err := s.stateFileOut.Seek(0, os.SEEK_SET); err != nil { + if _, err := s.stateFileOut.Seek(0, io.SeekStart); err != nil { return err } if err := s.stateFileOut.Truncate(0); err != nil { @@ -269,7 +269,7 @@ func (s *Filesystem) refreshState() error { } // we have a state file, make sure we're at the start - s.stateFileOut.Seek(0, os.SEEK_SET) + s.stateFileOut.Seek(0, io.SeekStart) reader = s.stateFileOut } diff --git a/states/statemgr/filesystem_lock_unix.go b/states/statemgr/filesystem_lock_unix.go index 4c4f571ed..1a9709452 100644 --- a/states/statemgr/filesystem_lock_unix.go +++ b/states/statemgr/filesystem_lock_unix.go @@ -3,8 +3,8 @@ package statemgr import ( + "io" "log" - "os" "syscall" ) @@ -14,7 +14,7 @@ func (s *Filesystem) lock() error { log.Printf("[TRACE] statemgr.Filesystem: locking %s using fcntl flock", s.path) flock := &syscall.Flock_t{ Type: syscall.F_RDLCK | syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), + Whence: int16(io.SeekStart), Start: 0, Len: 0, } @@ -27,7 +27,7 @@ func (s *Filesystem) unlock() error { log.Printf("[TRACE] statemgr.Filesystem: unlocking %s using fcntl flock", s.path) flock := &syscall.Flock_t{ Type: syscall.F_UNLCK, - Whence: int16(os.SEEK_SET), + Whence: int16(io.SeekStart), Start: 0, Len: 0, } diff --git a/states/statemgr/statemgr_test.go b/states/statemgr/statemgr_test.go index 41c73d1dc..e9e822671 100644 --- a/states/statemgr/statemgr_test.go +++ b/states/statemgr/statemgr_test.go @@ -67,12 +67,11 @@ func TestLockWithContext(t *testing.T) { // unlock the state during LockWithContext unlocked := make(chan struct{}) + var unlockErr error go func() { defer close(unlocked) <-attempted - if err := s.Unlock(id); err != nil { - t.Fatal(err) - } + unlockErr = s.Unlock(id) }() ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) @@ -85,6 +84,9 @@ func TestLockWithContext(t *testing.T) { // ensure the goruotine completes <-unlocked + if unlockErr != nil { + t.Fatal(unlockErr) + } } func TestMain(m *testing.M) { diff --git a/synchronized_writers.go b/synchronized_writers.go deleted file mode 100644 index 2533d1316..000000000 --- a/synchronized_writers.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "io" - "sync" -) - -type synchronizedWriter struct { - io.Writer - mutex *sync.Mutex -} - -// synchronizedWriters takes a set of writers and returns wrappers that ensure -// that only one write can be outstanding at a time across the whole set. -func synchronizedWriters(targets ...io.Writer) []io.Writer { - mutex := &sync.Mutex{} - ret := make([]io.Writer, len(targets)) - for i, target := range targets { - ret[i] = &synchronizedWriter{ - Writer: target, - mutex: mutex, - } - } - return ret -} - -func (w *synchronizedWriter) Write(p []byte) (int, error) { - w.mutex.Lock() - defer w.mutex.Unlock() - return w.Writer.Write(p) -} diff --git a/terraform/context.go b/terraform/context.go index e74a72ae4..a7fa0fb3e 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -1,12 +1,13 @@ package terraform import ( - "bytes" "context" "fmt" "log" + "strings" "sync" + "github.com/apparentlymart/go-versions/versions" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/instances" @@ -15,10 +16,11 @@ import ( "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/provisioners" "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/states/statefile" "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/terraform/internal/depsfile" + "github.com/hashicorp/terraform/internal/getproviders" _ "github.com/hashicorp/terraform/internal/logging" ) @@ -35,17 +37,6 @@ const ( InputModeStd = InputModeProvider ) -var ( - // contextFailOnShadowError will cause Context operations to return - // errors when shadow operations fail. This is only used for testing. - contextFailOnShadowError = false - - // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every - // Plan operation, effectively testing the Diff DeepCopy whenever - // a Plan occurs. This is enabled for tests. - contextTestDeepCopyOnPlan = false -) - // ContextOpts are the user-configurable options to create a context with // NewContext. type ContextOpts struct { @@ -67,6 +58,14 @@ type ContextOpts struct { // plugins that will be requested from the provider resolver. ProviderSHA256s map[string][]byte + // If non-nil, will be verified to ensure that provider requirements from + // configuration can be satisfied by the set of locked dependencies. + LockedDependencies *depsfile.Locks + + // Set of providers to exclude from the requirements check process, as they + // are marked as in local development. + ProvidersInDevelopment map[addrs.Provider]struct{} + UIInput UIInput } @@ -115,11 +114,9 @@ type Context struct { parallelSem Semaphore providerInputConfig map[string]map[string]cty.Value providerSHA256s map[string][]byte - runLock sync.Mutex runCond *sync.Cond runContext context.Context runContextCancel context.CancelFunc - shadowErr error } // (additional methods on Context can be found in context_*.go files.) @@ -212,6 +209,50 @@ func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { config = configs.NewEmptyConfig() } + // If we have a configuration and a set of locked dependencies, verify that + // the provider requirements from the configuration can be satisfied by the + // locked dependencies. + if opts.LockedDependencies != nil { + reqs, providerDiags := config.ProviderRequirements() + diags = diags.Append(providerDiags) + + locked := opts.LockedDependencies.AllProviders() + unmetReqs := make(getproviders.Requirements) + for provider, versionConstraints := range reqs { + // Builtin providers are not listed in the locks file + if provider.IsBuiltIn() { + continue + } + // Development providers must be excluded from this check + if _, ok := opts.ProvidersInDevelopment[provider]; ok { + continue + } + // If the required provider doesn't exist in the lock, or the + // locked version doesn't meet the constraints, mark the + // requirement unmet + acceptable := versions.MeetingConstraints(versionConstraints) + if lock, ok := locked[provider]; !ok || !acceptable.Has(lock.Version()) { + unmetReqs[provider] = versionConstraints + } + } + + if len(unmetReqs) > 0 { + var buf strings.Builder + for provider, versionConstraints := range unmetReqs { + fmt.Fprintf(&buf, "\n- %s", provider) + if len(versionConstraints) > 0 { + fmt.Fprintf(&buf, " (%s)", getproviders.VersionConstraintsString(versionConstraints)) + } + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider requirements cannot be satisfied by locked dependencies", + fmt.Sprintf("The following required providers are not installed:\n%s\n\nPlease run \"terraform init\".", buf.String()), + )) + return nil, diags + } + } + log.Printf("[TRACE] terraform.NewContext: complete") // By the time we get here, we should have values defined for all of @@ -329,33 +370,6 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags. } } -// ShadowError returns any errors caught during a shadow operation. -// -// A shadow operation is an operation run in parallel to a real operation -// that performs the same tasks using new logic on copied state. The results -// are compared to ensure that the new logic works the same as the old logic. -// The shadow never affects the real operation or return values. -// -// The result of the shadow operation are only available through this function -// call after a real operation is complete. -// -// For API consumers of Context, you can safely ignore this function -// completely if you have no interest in helping report experimental feature -// errors to Terraform maintainers. Otherwise, please call this function -// after every operation and report this to the user. -// -// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect -// the real state or result of a real operation. They are purely informational -// to assist in future Terraform versions being more stable. Please message -// this effectively to the end user. -// -// This must be called only when no other operation is running (refresh, -// plan, etc.). The result can be used in parallel to any other operation -// running. -func (c *Context) ShadowError() error { - return c.shadowErr -} - // State returns a copy of the current state associated with this context. // // This cannot safely be called in parallel with any other Context function. @@ -516,6 +530,20 @@ The -target option is not for routine use, and is provided only for exceptional )) } + var plan *plans.Plan + var planDiags tfdiags.Diagnostics + switch { + case c.destroy: + plan, planDiags = c.destroyPlan() + default: + plan, planDiags = c.plan() + } + diags = diags.Append(planDiags) + if diags.HasErrors() { + return nil, diags + } + + // convert the variables into the format expected for the plan varVals := make(map[string]plans.DynamicValue, len(c.variables)) for k, iv := range c.variables { // We use cty.DynamicPseudoType here so that we'll save both the @@ -533,44 +561,85 @@ The -target option is not for routine use, and is provided only for exceptional varVals[k] = dv } - p := &plans.Plan{ - VariableValues: varVals, - TargetAddrs: c.targets, - ProviderSHA256s: c.providerSHA256s, - } + // insert the run-specific data from the context into the plan; variables, + // targets and provider SHAs. + plan.VariableValues = varVals + plan.TargetAddrs = c.targets + plan.ProviderSHA256s = c.providerSHA256s - operation := walkPlan - graphType := GraphTypePlan - if c.destroy { - operation = walkPlanDestroy - graphType = GraphTypePlanDestroy - } + return plan, diags +} - graph, graphDiags := c.Graph(graphType, nil) +func (c *Context) plan() (*plans.Plan, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + graph, graphDiags := c.Graph(GraphTypePlan, nil) diags = diags.Append(graphDiags) if graphDiags.HasErrors() { return nil, diags } // Do the walk - walker, walkDiags := c.walk(graph, operation) + walker, walkDiags := c.walk(graph, walkPlan) diags = diags.Append(walker.NonFatalDiagnostics) diags = diags.Append(walkDiags) if walkDiags.HasErrors() { return nil, diags } - p.Changes = c.changes + plan := &plans.Plan{ + Changes: c.changes, + } c.refreshState.SyncWrapper().RemovePlannedResourceInstanceObjects() refreshedState := c.refreshState.DeepCopy() - p.State = refreshedState + plan.State = refreshedState // replace the working state with the updated state, so that immediate calls // to Apply work as expected. c.state = refreshedState - return p, diags + return plan, diags +} + +func (c *Context) destroyPlan() (*plans.Plan, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + destroyPlan := &plans.Plan{} + c.changes = plans.NewChanges() + + // A destroy plan starts by running Refresh to read any pending data + // sources, and remove missing managed resources. This is required because + // a "destroy plan" is only creating delete changes, and is essentially a + // local operation. + if !c.skipRefresh { + refreshPlan, refreshDiags := c.plan() + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return nil, diags + } + + // insert the refreshed state into the destroy plan result, and discard + // the changes recorded from the refresh. + destroyPlan.State = refreshPlan.State + c.changes = plans.NewChanges() + } + + graph, graphDiags := c.Graph(GraphTypePlanDestroy, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return nil, diags + } + + // Do the walk + walker, walkDiags := c.walk(graph, walkPlan) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags + } + + destroyPlan.Changes = c.changes + return destroyPlan, diags } // Refresh goes through all the resources in the state and refreshes them @@ -694,9 +763,6 @@ func (c *Context) acquireRun(phase string) func() { // Reset the stop hook so we're not stopped c.sh.Reset() - // Reset the shadow errors - c.shadowErr = nil - return c.releaseRun } @@ -841,37 +907,3 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s return stop, wait } - -// ShimLegacyState is a helper that takes the legacy state type and -// converts it to the new state type. -// -// This is implemented as a state file upgrade, so it will not preserve -// parts of the state structure that are not included in a serialized state, -// such as the resolved results of any local values, outputs in non-root -// modules, etc. -func ShimLegacyState(legacy *State) (*states.State, error) { - if legacy == nil { - return nil, nil - } - var buf bytes.Buffer - err := WriteState(legacy, &buf) - if err != nil { - return nil, err - } - f, err := statefile.Read(&buf) - if err != nil { - return nil, err - } - return f.State, err -} - -// MustShimLegacyState is a wrapper around ShimLegacyState that panics if -// the conversion does not succeed. This is primarily intended for tests where -// the given legacy state is an object constructed within the test. -func MustShimLegacyState(legacy *State) *states.State { - ret, err := ShimLegacyState(legacy) - if err != nil { - panic(err) - } - return ret -} diff --git a/terraform/context_apply2_test.go b/terraform/context_apply2_test.go new file mode 100644 index 000000000..cc3ee2f47 --- /dev/null +++ b/terraform/context_apply2_test.go @@ -0,0 +1 @@ +package terraform diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index 1c77293fd..1f80ae7d3 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -34,8 +34,8 @@ import ( func TestContext2Apply_basic(t *testing.T) { m := testModule(t, "apply-good") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -75,7 +75,6 @@ func TestContext2Apply_unstable(t *testing.T) { m := testModule(t, "apply-unstable") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -94,7 +93,7 @@ func TestContext2Apply_unstable(t *testing.T) { Type: "test_resource", Name: "foo", }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance) - schema := p.GetSchemaReturn.ResourceTypes["test_resource"] // automatically available in mock + schema := p.GetSchemaResponse.ResourceTypes["test_resource"].Block rds := plan.Changes.ResourceInstance(addr) rd, err := rds.Decode(schema.ImpliedType()) if err != nil { @@ -132,8 +131,8 @@ func TestContext2Apply_unstable(t *testing.T) { func TestContext2Apply_escape(t *testing.T) { m := testModule(t, "apply-escape") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -162,8 +161,8 @@ aws_instance.bar: func TestContext2Apply_resourceCountOneList(t *testing.T) { m := testModule(t, "apply-resource-count-one-list") p := testProvider("null") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -193,7 +192,6 @@ test = [foo]`) func TestContext2Apply_resourceCountZeroList(t *testing.T) { m := testModule(t, "apply-resource-count-zero-list") p := testProvider("null") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -354,7 +352,6 @@ func TestContext2Apply_resourceDependsOnModuleDestroy(t *testing.T) { var globalState *states.State { - p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -533,8 +530,8 @@ func TestContext2Apply_resourceDependsOnModuleInModule(t *testing.T) { func TestContext2Apply_mapVarBetweenModules(t *testing.T) { m := testModule(t, "apply-map-var-through-module") p := testProvider("null") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -573,8 +570,8 @@ module.test: func TestContext2Apply_refCount(t *testing.T) { m := testModule(t, "apply-ref-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -606,8 +603,8 @@ func TestContext2Apply_refCount(t *testing.T) { func TestContext2Apply_providerAlias(t *testing.T) { m := testModule(t, "apply-provider-alias") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -690,8 +687,8 @@ func TestContext2Apply_providerAliasConfigure(t *testing.T) { func TestContext2Apply_providerWarning(t *testing.T) { m := testModule(t, "apply-provider-warning") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("just a warning")) return @@ -732,7 +729,6 @@ func TestContext2Apply_emptyModule(t *testing.T) { // A module with only outputs (no resources) m := testModule(t, "apply-empty-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -761,8 +757,8 @@ func TestContext2Apply_emptyModule(t *testing.T) { func TestContext2Apply_createBeforeDestroy(t *testing.T) { m := testModule(t, "apply-good-create-before") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) root.SetResourceInstanceCurrent( @@ -808,8 +804,8 @@ func TestContext2Apply_createBeforeDestroy(t *testing.T) { func TestContext2Apply_createBeforeDestroyUpdate(t *testing.T) { m := testModule(t, "apply-good-create-before-update") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -858,8 +854,8 @@ func TestContext2Apply_createBeforeDestroyUpdate(t *testing.T) { func TestContext2Apply_createBeforeDestroy_dependsNonCBD(t *testing.T) { m := testModule(t, "apply-cbd-depends-non-cbd") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -921,8 +917,8 @@ func TestContext2Apply_createBeforeDestroy_hook(t *testing.T) { h := new(MockHook) m := testModule(t, "apply-good-create-before") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) root.SetResourceInstanceCurrent( @@ -982,8 +978,8 @@ func TestContext2Apply_createBeforeDestroy_hook(t *testing.T) { func TestContext2Apply_createBeforeDestroy_deposedCount(t *testing.T) { m := testModule(t, "apply-cbd-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1060,8 +1056,8 @@ aws_instance.bar.1: func TestContext2Apply_createBeforeDestroy_deposedOnly(t *testing.T) { m := testModule(t, "apply-cbd-deposed-only") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1113,7 +1109,6 @@ aws_instance.bar: func TestContext2Apply_destroyComputed(t *testing.T) { m := testModule(t, "apply-destroy-computed") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1159,7 +1154,6 @@ func TestContext2Apply_destroyDependsOn(t *testing.T) { func testContext2Apply_destroyDependsOn(t *testing.T) { m := testModule(t, "apply-destroy-depends-on") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -1275,9 +1269,9 @@ func TestContext2Apply_destroyDependsOnStateOnly(t *testing.T) { } func testContext2Apply_destroyDependsOnStateOnly(t *testing.T, state *states.State) { + state = state.DeepCopy() m := testModule(t, "empty") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn // Record the order we see Apply var actual []string @@ -1371,9 +1365,9 @@ func TestContext2Apply_destroyDependsOnStateOnlyModule(t *testing.T) { } func testContext2Apply_destroyDependsOnStateOnlyModule(t *testing.T, state *states.State) { + state = state.DeepCopy() m := testModule(t, "empty") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn // Record the order we see Apply @@ -1414,9 +1408,8 @@ func testContext2Apply_destroyDependsOnStateOnlyModule(t *testing.T, state *stat func TestContext2Apply_dataBasic(t *testing.T) { m := testModule(t, "apply-data-basic") p := testProvider("null") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yo"), "foo": cty.NullVal(cty.String), @@ -1449,8 +1442,13 @@ func TestContext2Apply_dataBasic(t *testing.T) { func TestContext2Apply_destroyData(t *testing.T) { m := testModule(t, "apply-destroy-data-resource") p := testProvider("null") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: req.Config, + } + } + state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) root.SetResourceInstanceCurrent( @@ -1493,6 +1491,8 @@ func TestContext2Apply_destroyData(t *testing.T) { } wantHookCalls := []*testHookCall{ + {"PreDiff", "data.null_data_source.testing"}, + {"PostDiff", "data.null_data_source.testing"}, {"PreDiff", "data.null_data_source.testing"}, {"PostDiff", "data.null_data_source.testing"}, {"PostStateUpdate", ""}, @@ -1509,7 +1509,6 @@ func TestContext2Apply_destroySkipsCBD(t *testing.T) { // just doing a `terraform destroy`. m := testModule(t, "apply-destroy-cbd") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1553,7 +1552,6 @@ func TestContext2Apply_destroySkipsCBD(t *testing.T) { func TestContext2Apply_destroyModuleVarProviderConfig(t *testing.T) { m := testModule(t, "apply-destroy-mod-var-provider-config") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1590,7 +1588,7 @@ func TestContext2Apply_destroyCrossProviders(t *testing.T) { p_aws := testProvider("aws") p_aws.ApplyResourceChangeFn = testApplyFn p_aws.PlanResourceChangeFn = testDiffFn - p_aws.GetSchemaReturn = &ProviderSchema{ + p_aws.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -1613,7 +1611,7 @@ func TestContext2Apply_destroyCrossProviders(t *testing.T) { }, }, }, - } + }) providers := map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p_aws), @@ -1666,8 +1664,8 @@ func getContextForApply_destroyCrossProviders(t *testing.T, m *configs.Config, p func TestContext2Apply_minimal(t *testing.T) { m := testModule(t, "apply-minimal") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1734,8 +1732,16 @@ func TestContext2Apply_cancel(t *testing.T) { }() state := <-stateCh - if applyDiags.HasErrors() { - t.Fatalf("unexpected errors: %s", applyDiags.Err()) + // only expecting an early exit error + if !applyDiags.HasErrors() { + t.Fatal("expected early exit error") + } + + for _, d := range applyDiags { + desc := d.Description() + if desc.Summary != "execution halted" { + t.Fatalf("unexpected error: %v", applyDiags.Err()) + } } actual := strings.TrimSpace(state.String()) @@ -1812,8 +1818,16 @@ func TestContext2Apply_cancelBlock(t *testing.T) { // Wait for apply to complete state := <-stateCh - if applyDiags.HasErrors() { - t.Fatalf("unexpected error: %s", applyDiags.Err()) + // only expecting an early exit error + if !applyDiags.HasErrors() { + t.Fatal("expected early exit error") + } + + for _, d := range applyDiags { + desc := d.Description() + if desc.Summary != "execution halted" { + t.Fatalf("unexpected error: %v", applyDiags.Err()) + } } checkStateString(t, state, ` @@ -1828,8 +1842,8 @@ aws_instance.foo: func TestContext2Apply_cancelProvisioner(t *testing.T) { m := testModule(t, "apply-cancel-provisioner") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr := testProvisioner() pr.GetSchemaResponse = provisioners.GetSchemaResponse{ @@ -1848,7 +1862,7 @@ func TestContext2Apply_cancelProvisioner(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -1882,7 +1896,18 @@ func TestContext2Apply_cancelProvisioner(t *testing.T) { // Wait for completion state := <-stateCh - assertNoErrors(t, applyDiags) + + // we are expecting only an early exit error + if !applyDiags.HasErrors() { + t.Fatal("expected early exit error") + } + + for _, d := range applyDiags { + desc := d.Description() + if desc.Summary != "execution halted" { + t.Fatalf("unexpected error: %v", applyDiags.Err()) + } + } checkStateString(t, state, ` aws_instance.foo: (tainted) @@ -1900,9 +1925,9 @@ aws_instance.foo: (tainted) func TestContext2Apply_compute(t *testing.T) { m := testModule(t, "apply-compute") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.ApplyResourceChangeFn = testApplyFn + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -1937,7 +1962,7 @@ func TestContext2Apply_compute(t *testing.T) { }, }, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -2027,7 +2052,6 @@ func TestContext2Apply_countDecrease(t *testing.T) { func TestContext2Apply_countDecreaseToOneX(t *testing.T) { m := testModule(t, "apply-count-dec-one") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -2088,7 +2112,6 @@ func TestContext2Apply_countDecreaseToOneX(t *testing.T) { func TestContext2Apply_countDecreaseToOneCorrupted(t *testing.T) { m := testModule(t, "apply-count-dec-one") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -2205,8 +2228,8 @@ aws_instance.foo.1: func TestContext2Apply_countVariable(t *testing.T) { m := testModule(t, "apply-count-variable") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -2233,8 +2256,8 @@ func TestContext2Apply_countVariable(t *testing.T) { func TestContext2Apply_countVariableRef(t *testing.T) { m := testModule(t, "apply-count-variable-ref") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -2266,7 +2289,6 @@ func TestContext2Apply_provisionerInterpCount(t *testing.T) { m, snap := testModuleWithSnapshot(t, "apply-provisioner-interp-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn pr := testProvisioner() @@ -2275,7 +2297,7 @@ func TestContext2Apply_provisionerInterpCount(t *testing.T) { addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), } - provisioners := map[string]ProvisionerFactory{ + provisioners := map[string]provisioners.Factory{ "local-exec": testProvisionerFuncFixed(pr), } ctx := testContext2(t, &ContextOpts{ @@ -2318,8 +2340,8 @@ func TestContext2Apply_provisionerInterpCount(t *testing.T) { func TestContext2Apply_foreachVariable(t *testing.T) { m := testModule(t, "plan-for-each-unknown-value") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -2351,8 +2373,8 @@ func TestContext2Apply_foreachVariable(t *testing.T) { func TestContext2Apply_moduleBasic(t *testing.T) { m := testModule(t, "apply-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -2401,7 +2423,7 @@ func TestContext2Apply_moduleDestroyOrder(t *testing.T) { return resp } - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -2411,7 +2433,7 @@ func TestContext2Apply_moduleDestroyOrder(t *testing.T) { }, }, }, - } + }) state := states.NewState() child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) @@ -2469,8 +2491,8 @@ func TestContext2Apply_moduleDestroyOrder(t *testing.T) { func TestContext2Apply_moduleInheritAlias(t *testing.T) { m := testModule(t, "apply-module-provider-inherit-alias") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { val := req.Config.GetAttr("value") @@ -2520,9 +2542,9 @@ func TestContext2Apply_orphanResource(t *testing.T) { // 2. Apply an empty configuration against the same state, which should // then clean up both the instances and the containing resource objects. p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.ApplyResourceChangeFn = testApplyFn + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_thing": { Attributes: map[string]*configschema.Attribute{ @@ -2531,7 +2553,7 @@ func TestContext2Apply_orphanResource(t *testing.T) { }, }, }, - } + }) // Step 1: create the resources and instances m := testModule(t, "apply-orphan-resource") @@ -2595,7 +2617,6 @@ func TestContext2Apply_orphanResource(t *testing.T) { func TestContext2Apply_moduleOrphanInheritAlias(t *testing.T) { m := testModule(t, "apply-module-provider-inherit-alias-orphan") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { @@ -2651,7 +2672,6 @@ func TestContext2Apply_moduleOrphanInheritAlias(t *testing.T) { func TestContext2Apply_moduleOrphanProvider(t *testing.T) { m := testModule(t, "apply-module-orphan-provider-inherit") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { @@ -2695,7 +2715,6 @@ func TestContext2Apply_moduleOrphanProvider(t *testing.T) { func TestContext2Apply_moduleOrphanGrandchildProvider(t *testing.T) { m := testModule(t, "apply-module-orphan-provider-inherit") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { @@ -2739,7 +2758,6 @@ func TestContext2Apply_moduleOrphanGrandchildProvider(t *testing.T) { func TestContext2Apply_moduleGrandchildProvider(t *testing.T) { m := testModule(t, "apply-module-grandchild-provider-inherit") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn var callLock sync.Mutex @@ -2786,8 +2804,8 @@ func TestContext2Apply_moduleGrandchildProvider(t *testing.T) { func TestContext2Apply_moduleOnlyProvider(t *testing.T) { m := testModule(t, "apply-module-only-provider") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pTest := testProvider("test") pTest.ApplyResourceChangeFn = testApplyFn pTest.PlanResourceChangeFn = testDiffFn @@ -2819,8 +2837,8 @@ func TestContext2Apply_moduleOnlyProvider(t *testing.T) { func TestContext2Apply_moduleProviderAlias(t *testing.T) { m := testModule(t, "apply-module-provider-alias") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -2847,7 +2865,6 @@ func TestContext2Apply_moduleProviderAlias(t *testing.T) { func TestContext2Apply_moduleProviderAliasTargets(t *testing.T) { m := testModule(t, "apply-module-provider-alias") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -2887,7 +2904,6 @@ func TestContext2Apply_moduleProviderAliasTargets(t *testing.T) { func TestContext2Apply_moduleProviderCloseNested(t *testing.T) { m := testModule(t, "apply-module-provider-close-nested") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -2926,8 +2942,8 @@ func TestContext2Apply_moduleProviderCloseNested(t *testing.T) { func TestContext2Apply_moduleVarRefExisting(t *testing.T) { m := testModule(t, "apply-ref-existing") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) root.SetResourceInstanceCurrent( @@ -2966,7 +2982,6 @@ func TestContext2Apply_moduleVarRefExisting(t *testing.T) { func TestContext2Apply_moduleVarResourceCount(t *testing.T) { m := testModule(t, "apply-module-var-resource-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -3016,8 +3031,8 @@ func TestContext2Apply_moduleVarResourceCount(t *testing.T) { func TestContext2Apply_moduleBool(t *testing.T) { m := testModule(t, "apply-module-bool") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -3046,8 +3061,8 @@ func TestContext2Apply_moduleBool(t *testing.T) { func TestContext2Apply_moduleTarget(t *testing.T) { m := testModule(t, "plan-targeted-cross-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -3094,8 +3109,8 @@ module.B: func TestContext2Apply_multiProvider(t *testing.T) { m := testModule(t, "apply-multi-provider") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pDO := testProvider("do") pDO.ApplyResourceChangeFn = testApplyFn @@ -3133,9 +3148,8 @@ func TestContext2Apply_multiProvider(t *testing.T) { func TestContext2Apply_multiProviderDestroy(t *testing.T) { m := testModule(t, "apply-multi-provider-destroy") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "addr": {Type: cty.String, Optional: true}, @@ -3149,12 +3163,12 @@ func TestContext2Apply_multiProviderDestroy(t *testing.T) { }, }, }, - } + }) p2 := testProvider("vault") p2.ApplyResourceChangeFn = testApplyFn p2.PlanResourceChangeFn = testDiffFn - p2.GetSchemaReturn = &ProviderSchema{ + p2.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "vault_instance": { Attributes: map[string]*configschema.Attribute{ @@ -3162,7 +3176,7 @@ func TestContext2Apply_multiProviderDestroy(t *testing.T) { }, }, }, - } + }) var state *states.State @@ -3254,9 +3268,8 @@ func TestContext2Apply_multiProviderDestroy(t *testing.T) { func TestContext2Apply_multiProviderDestroyChild(t *testing.T) { m := testModule(t, "apply-multi-provider-destroy-child") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "value": {Type: cty.String, Optional: true}, @@ -3270,12 +3283,12 @@ func TestContext2Apply_multiProviderDestroyChild(t *testing.T) { }, }, }, - } + }) p2 := testProvider("vault") p2.ApplyResourceChangeFn = testApplyFn p2.PlanResourceChangeFn = testDiffFn - p2.GetSchemaReturn = &ProviderSchema{ + p2.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "vault_instance": { @@ -3284,7 +3297,7 @@ func TestContext2Apply_multiProviderDestroyChild(t *testing.T) { }, }, }, - } + }) var state *states.State @@ -3375,7 +3388,6 @@ func TestContext2Apply_multiProviderDestroyChild(t *testing.T) { func TestContext2Apply_multiVar(t *testing.T) { m := testModule(t, "apply-multi-var") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn // First, apply with a count of 3 @@ -3457,11 +3469,10 @@ func TestContext2Apply_multiVarComprehensive(t *testing.T) { m := testModule(t, "apply-multi-var-comprehensive") p := testProvider("test") - configs := map[string]*ResourceConfig{} + configs := map[string]cty.Value{} var configsLock sync.Mutex p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { proposed := req.ProposedNewState configsLock.Lock() @@ -3471,7 +3482,7 @@ func TestContext2Apply_multiVarComprehensive(t *testing.T) { // and so the assertions below expect an old-style ResourceConfig, which // we'll construct via our shim for now to avoid rewriting all of the // assertions. - configs[key] = NewResourceConfigShimmed(req.Config, p.GetSchemaReturn.ResourceTypes["test_thing"]) + configs[key] = req.ProposedNewState retVals := make(map[string]cty.Value) for it := proposed.ElementIterator(); it.Next(); { @@ -3493,7 +3504,7 @@ func TestContext2Apply_multiVarComprehensive(t *testing.T) { } } - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_thing": { Attributes: map[string]*configschema.Attribute{ @@ -3515,7 +3526,7 @@ func TestContext2Apply_multiVarComprehensive(t *testing.T) { }, }, }, - } + }) // First, apply with a count of 3 ctx := testContext2(t, &ContextOpts{ @@ -3536,102 +3547,99 @@ func TestContext2Apply_multiVarComprehensive(t *testing.T) { t.Fatalf("errors during plan") } - checkConfig := func(key string, want map[string]interface{}) { + checkConfig := func(key string, want cty.Value) { configsLock.Lock() defer configsLock.Unlock() - if _, ok := configs[key]; !ok { + got, ok := configs[key] + if !ok { t.Errorf("no config recorded for %s; expected a configuration", key) return } - got := configs[key].Config + t.Run("config for "+key, func(t *testing.T) { - want["key"] = key // to avoid doing this for every example for _, problem := range deep.Equal(got, want) { t.Errorf(problem) } }) } - checkConfig("multi_count_var.0", map[string]interface{}{ - "source_id": hcl2shim.UnknownVariableValue, - "source_name": "source.0", - }) - checkConfig("multi_count_var.2", map[string]interface{}{ - "source_id": hcl2shim.UnknownVariableValue, - "source_name": "source.2", - }) - checkConfig("multi_count_derived.0", map[string]interface{}{ - "source_id": hcl2shim.UnknownVariableValue, - "source_name": "source.0", - }) - checkConfig("multi_count_derived.2", map[string]interface{}{ - "source_id": hcl2shim.UnknownVariableValue, - "source_name": "source.2", - }) - checkConfig("whole_splat", map[string]interface{}{ - "source_ids": []interface{}{ - hcl2shim.UnknownVariableValue, - hcl2shim.UnknownVariableValue, - hcl2shim.UnknownVariableValue, - }, - "source_names": []interface{}{ - "source.0", - "source.1", - "source.2", - }, - "source_ids_from_func": hcl2shim.UnknownVariableValue, - "source_names_from_func": []interface{}{ - "source.0", - "source.1", - "source.2", - }, - - "source_ids_wrapped": []interface{}{ - []interface{}{ - hcl2shim.UnknownVariableValue, - hcl2shim.UnknownVariableValue, - hcl2shim.UnknownVariableValue, - }, - }, - "source_names_wrapped": []interface{}{ - []interface{}{ - "source.0", - "source.1", - "source.2", - }, - }, - - "first_source_id": hcl2shim.UnknownVariableValue, - "first_source_name": "source.0", - }) - checkConfig("child.whole_splat", map[string]interface{}{ - "source_ids": []interface{}{ - hcl2shim.UnknownVariableValue, - hcl2shim.UnknownVariableValue, - hcl2shim.UnknownVariableValue, - }, - "source_names": []interface{}{ - "source.0", - "source.1", - "source.2", - }, - - "source_ids_wrapped": []interface{}{ - []interface{}{ - hcl2shim.UnknownVariableValue, - hcl2shim.UnknownVariableValue, - hcl2shim.UnknownVariableValue, - }, - }, - "source_names_wrapped": []interface{}{ - []interface{}{ - "source.0", - "source.1", - "source.2", - }, - }, - }) + checkConfig("multi_count_var.0", cty.ObjectVal(map[string]cty.Value{ + "source_id": cty.UnknownVal(cty.String), + "source_name": cty.StringVal("source.0"), + })) + checkConfig("multi_count_var.2", cty.ObjectVal(map[string]cty.Value{ + "source_id": cty.UnknownVal(cty.String), + "source_name": cty.StringVal("source.2"), + })) + checkConfig("multi_count_derived.0", cty.ObjectVal(map[string]cty.Value{ + "source_id": cty.UnknownVal(cty.String), + "source_name": cty.StringVal("source.0"), + })) + checkConfig("multi_count_derived.2", cty.ObjectVal(map[string]cty.Value{ + "source_id": cty.UnknownVal(cty.String), + "source_name": cty.StringVal("source.2"), + })) + checkConfig("whole_splat", cty.ObjectVal(map[string]cty.Value{ + "source_ids": cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + "source_names": cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + "source_ids_from_func": cty.UnknownVal(cty.String), + "source_names_from_func": cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + "source_ids_wrapped": cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + }), + "source_names_wrapped": cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + }), + "first_source_id": cty.UnknownVal(cty.String), + "first_source_name": cty.StringVal("source.0"), + })) + checkConfig("child.whole_splat", cty.ObjectVal(map[string]cty.Value{ + "source_ids": cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + "source_names": cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + "source_ids_wrapped": cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + cty.UnknownVal(cty.String), + }), + }), + "source_names_wrapped": cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{ + cty.StringVal("source.0"), + cty.StringVal("source.1"), + cty.StringVal("source.2"), + }), + }), + })) t.Run("apply", func(t *testing.T) { state, diags := ctx.Apply() @@ -3665,7 +3673,6 @@ func TestContext2Apply_multiVarComprehensive(t *testing.T) { func TestContext2Apply_multiVarOrder(t *testing.T) { m := testModule(t, "apply-multi-var-order") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn // First, apply with a count of 3 @@ -3699,7 +3706,6 @@ func TestContext2Apply_multiVarOrder(t *testing.T) { func TestContext2Apply_multiVarOrderInterp(t *testing.T) { m := testModule(t, "apply-multi-var-order-interp") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn // First, apply with a count of 3 @@ -3737,8 +3743,8 @@ func TestContext2Apply_multiVarCountDec(t *testing.T) { { m := testModule(t, "apply-multi-var-count-dec") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -3844,9 +3850,8 @@ func TestContext2Apply_multiVarCountDec(t *testing.T) { func TestContext2Apply_multiVarMissingState(t *testing.T) { m := testModule(t, "apply-multi-var-missing-state") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_thing": { Attributes: map[string]*configschema.Attribute{ @@ -3855,7 +3860,7 @@ func TestContext2Apply_multiVarMissingState(t *testing.T) { }, }, }, - } + }) // First, apply with a count of 3 ctx := testContext2(t, &ContextOpts{ @@ -3880,7 +3885,6 @@ func TestContext2Apply_multiVarMissingState(t *testing.T) { func TestContext2Apply_outputOrphan(t *testing.T) { m := testModule(t, "apply-output-orphan") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -3915,7 +3919,6 @@ func TestContext2Apply_outputOrphan(t *testing.T) { func TestContext2Apply_outputOrphanModule(t *testing.T) { m := testModule(t, "apply-output-orphan-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -3970,7 +3973,6 @@ func TestContext2Apply_outputOrphanModule(t *testing.T) { func TestContext2Apply_providerComputedVar(t *testing.T) { m := testModule(t, "apply-provider-computed") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn pTest := testProvider("test") @@ -4006,7 +4008,6 @@ func TestContext2Apply_providerComputedVar(t *testing.T) { func TestContext2Apply_providerConfigureDisabled(t *testing.T) { m := testModule(t, "apply-provider-configure-disabled") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { @@ -4042,8 +4043,8 @@ func TestContext2Apply_provisionerModule(t *testing.T) { m := testModule(t, "apply-provisioner-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr := testProvisioner() pr.GetSchemaResponse = provisioners.GetSchemaResponse{ @@ -4059,7 +4060,7 @@ func TestContext2Apply_provisionerModule(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4089,8 +4090,8 @@ func TestContext2Apply_Provisioner_compute(t *testing.T) { m := testModule(t, "apply-provisioner-compute") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { val := req.Config.GetAttr("command").AsString() @@ -4108,7 +4109,7 @@ func TestContext2Apply_Provisioner_compute(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, Variables: InputValues{ @@ -4166,7 +4167,7 @@ func TestContext2Apply_provisionerCreateFail(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4203,7 +4204,7 @@ func TestContext2Apply_provisionerCreateFailNoId(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4227,9 +4228,9 @@ func TestContext2Apply_provisionerCreateFailNoId(t *testing.T) { func TestContext2Apply_provisionerFail(t *testing.T) { m := testModule(t, "apply-provisioner-fail") p := testProvider("aws") - pr := testProvisioner() p.PlanResourceChangeFn = testDiffFn p.ApplyResourceChangeFn = testApplyFn + pr := testProvisioner() pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("EXPLOSION")) return @@ -4240,7 +4241,7 @@ func TestContext2Apply_provisionerFail(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4265,8 +4266,8 @@ func TestContext2Apply_provisionerFail_createBeforeDestroy(t *testing.T) { m := testModule(t, "apply-provisioner-fail-create-before") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("EXPLOSION")) return @@ -4288,7 +4289,7 @@ func TestContext2Apply_provisionerFail_createBeforeDestroy(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, State: state, @@ -4413,7 +4414,7 @@ func TestContext2Apply_multiDepose_createBeforeDestroy(t *testing.T) { m := testModule(t, "apply-multi-depose-create-before-destroy") p := testProvider("aws") ps := map[addrs.Provider]providers.Factory{addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p)} - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -4422,7 +4423,7 @@ func TestContext2Apply_multiDepose_createBeforeDestroy(t *testing.T) { }, }, }, - } + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -4626,8 +4627,8 @@ func TestContext2Apply_provisionerFailContinue(t *testing.T) { m := testModule(t, "apply-provisioner-fail-continue") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) @@ -4639,7 +4640,7 @@ func TestContext2Apply_provisionerFailContinue(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4674,7 +4675,6 @@ func TestContext2Apply_provisionerFailContinueHook(t *testing.T) { m := testModule(t, "apply-provisioner-fail-continue") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) @@ -4687,7 +4687,7 @@ func TestContext2Apply_provisionerFailContinueHook(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4712,7 +4712,6 @@ func TestContext2Apply_provisionerDestroy(t *testing.T) { m := testModule(t, "apply-provisioner-destroy") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { val := req.Config.GetAttr("command").AsString() @@ -4741,7 +4740,7 @@ func TestContext2Apply_provisionerDestroy(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4768,7 +4767,6 @@ func TestContext2Apply_provisionerDestroyFail(t *testing.T) { m := testModule(t, "apply-provisioner-destroy") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("provisioner error")) @@ -4793,7 +4791,7 @@ func TestContext2Apply_provisionerDestroyFail(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4826,7 +4824,6 @@ func TestContext2Apply_provisionerDestroyFailContinue(t *testing.T) { m := testModule(t, "apply-provisioner-destroy-continue") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn var l sync.Mutex @@ -4862,7 +4859,7 @@ func TestContext2Apply_provisionerDestroyFailContinue(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4896,7 +4893,6 @@ func TestContext2Apply_provisionerDestroyFailContinueFail(t *testing.T) { m := testModule(t, "apply-provisioner-destroy-fail") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn var l sync.Mutex @@ -4932,7 +4928,7 @@ func TestContext2Apply_provisionerDestroyFailContinueFail(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -4968,8 +4964,8 @@ func TestContext2Apply_provisionerDestroyTainted(t *testing.T) { m := testModule(t, "apply-provisioner-destroy") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn destroyCalled := false pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { @@ -4999,7 +4995,7 @@ func TestContext2Apply_provisionerDestroyTainted(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, Variables: InputValues{ @@ -5042,8 +5038,8 @@ aws_instance.foo["a"]: func TestContext2Apply_provisionerResourceRef(t *testing.T) { m := testModule(t, "apply-provisioner-resource-ref") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr := testProvisioner() pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { @@ -5060,7 +5056,7 @@ func TestContext2Apply_provisionerResourceRef(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -5090,8 +5086,8 @@ func TestContext2Apply_provisionerSelfRef(t *testing.T) { m := testModule(t, "apply-provisioner-self-ref") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { val := req.Config.GetAttr("command") if val.AsString() != "bar" { @@ -5106,7 +5102,7 @@ func TestContext2Apply_provisionerSelfRef(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -5139,8 +5135,8 @@ func TestContext2Apply_provisionerMultiSelfRef(t *testing.T) { m := testModule(t, "apply-provisioner-multi-self-ref") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { lock.Lock() defer lock.Unlock() @@ -5159,7 +5155,7 @@ func TestContext2Apply_provisionerMultiSelfRef(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -5199,8 +5195,8 @@ func TestContext2Apply_provisionerMultiSelfRefSingle(t *testing.T) { m := testModule(t, "apply-provisioner-multi-self-ref-single") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { lock.Lock() defer lock.Unlock() @@ -5219,7 +5215,7 @@ func TestContext2Apply_provisionerMultiSelfRefSingle(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -5256,7 +5252,6 @@ func TestContext2Apply_provisionerExplicitSelfRef(t *testing.T) { m := testModule(t, "apply-provisioner-explicit-self-ref") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { val := req.Config.GetAttr("command") @@ -5274,7 +5269,7 @@ func TestContext2Apply_provisionerExplicitSelfRef(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -5303,7 +5298,7 @@ func TestContext2Apply_provisionerExplicitSelfRef(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -5326,7 +5321,6 @@ func TestContext2Apply_provisionerForEachSelfRef(t *testing.T) { m := testModule(t, "apply-provisioner-for-each-self") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { @@ -5343,7 +5337,7 @@ func TestContext2Apply_provisionerForEachSelfRef(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -5363,14 +5357,14 @@ func TestContext2Apply_Provisioner_Diff(t *testing.T) { m := testModule(t, "apply-provisioner-diff") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -5418,7 +5412,7 @@ func TestContext2Apply_Provisioner_Diff(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, State: state, @@ -5469,7 +5463,6 @@ func TestContext2Apply_outputDiffVars(t *testing.T) { State: state, }) - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn //func(info *InstanceInfo, s *InstanceState, rc *ResourceConfig) (*InstanceDiff, error) { // d := &InstanceDiff{ @@ -5512,7 +5505,6 @@ func TestContext2Apply_destroyX(t *testing.T) { m := testModule(t, "apply-destroy") h := new(HookRecordApplyOrder) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -5572,7 +5564,6 @@ func TestContext2Apply_destroyOrder(t *testing.T) { m := testModule(t, "apply-destroy") h := new(HookRecordApplyOrder) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -5635,7 +5626,6 @@ func TestContext2Apply_destroyModulePrefix(t *testing.T) { m := testModule(t, "apply-destroy-module-resource-prefix") h := new(MockHook) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -5676,7 +5666,7 @@ func TestContext2Apply_destroyModulePrefix(t *testing.T) { t.Fatalf("plan errors: %s", diags.Err()) } - state, diags = ctx.Apply() + _, diags = ctx.Apply() if diags.HasErrors() { t.Fatalf("diags: %s", diags.Err()) } @@ -5690,7 +5680,6 @@ func TestContext2Apply_destroyModulePrefix(t *testing.T) { func TestContext2Apply_destroyNestedModule(t *testing.T) { m := testModule(t, "apply-destroy-nested-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -5732,7 +5721,6 @@ func TestContext2Apply_destroyNestedModule(t *testing.T) { func TestContext2Apply_destroyDeeplyNestedModule(t *testing.T) { m := testModule(t, "apply-destroy-deeply-nested-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -5774,7 +5762,6 @@ func TestContext2Apply_destroyDeeplyNestedModule(t *testing.T) { func TestContext2Apply_destroyModuleWithAttrsReferencingResource(t *testing.T) { m, snap := testModuleWithSnapshot(t, "apply-destroy-module-with-attrs") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn var state *states.State @@ -5855,7 +5842,6 @@ func TestContext2Apply_destroyModuleWithAttrsReferencingResource(t *testing.T) { func TestContext2Apply_destroyWithModuleVariableAndCount(t *testing.T) { m, snap := testModuleWithSnapshot(t, "apply-destroy-mod-var-and-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn var state *states.State @@ -5932,7 +5918,6 @@ func TestContext2Apply_destroyWithModuleVariableAndCount(t *testing.T) { func TestContext2Apply_destroyTargetWithModuleVariableAndCount(t *testing.T) { m := testModule(t, "apply-destroy-mod-var-and-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn var state *states.State @@ -6011,7 +5996,6 @@ func TestContext2Apply_destroyTargetWithModuleVariableAndCount(t *testing.T) { func TestContext2Apply_destroyWithModuleVariableAndCountNested(t *testing.T) { m, snap := testModuleWithSnapshot(t, "apply-destroy-mod-var-and-count-nested") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn var state *states.State @@ -6088,7 +6072,6 @@ func TestContext2Apply_destroyWithModuleVariableAndCountNested(t *testing.T) { func TestContext2Apply_destroyOutputs(t *testing.T) { m := testModule(t, "apply-destroy-outputs") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { @@ -6182,7 +6165,6 @@ func TestContext2Apply_destroyOrphan(t *testing.T) { State: state, }) - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn if _, diags := ctx.Plan(); diags.HasErrors() { @@ -6204,7 +6186,6 @@ func TestContext2Apply_destroyTaintedProvisioner(t *testing.T) { m := testModule(t, "apply-destroy-provisioner") p := testProvider("aws") pr := testProvisioner() - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -6223,7 +6204,7 @@ func TestContext2Apply_destroyTaintedProvisioner(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, State: state, @@ -6294,7 +6275,7 @@ func TestContext2Apply_errorDestroy(t *testing.T) { m := testModule(t, "empty") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_thing": { Attributes: map[string]*configschema.Attribute{ @@ -6302,7 +6283,7 @@ func TestContext2Apply_errorDestroy(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { // Should actually be called for this test, because Terraform Core // constructs the plan for a destroy operation itself. @@ -6367,7 +6348,7 @@ func TestContext2Apply_errorCreateInvalidNew(t *testing.T) { m := testModule(t, "apply-error") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -6376,7 +6357,7 @@ func TestContext2Apply_errorCreateInvalidNew(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: req.ProposedNewState, @@ -6431,7 +6412,7 @@ func TestContext2Apply_errorUpdateNullNew(t *testing.T) { m := testModule(t, "apply-error") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -6440,7 +6421,7 @@ func TestContext2Apply_errorUpdateNullNew(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: req.ProposedNewState, @@ -6574,7 +6555,6 @@ func TestContext2Apply_hook(t *testing.T) { m := testModule(t, "apply-good") h := new(MockHook) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -6607,7 +6587,6 @@ func TestContext2Apply_hookOrphan(t *testing.T) { m := testModule(t, "apply-blank") h := new(MockHook) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -6659,8 +6638,8 @@ func TestContext2Apply_idAttr(t *testing.T) { }, }) - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn if _, diags := ctx.Plan(); diags.HasErrors() { t.Fatalf("plan errors: %s", diags.Err()) @@ -6689,8 +6668,8 @@ func TestContext2Apply_idAttr(t *testing.T) { func TestContext2Apply_outputBasic(t *testing.T) { m := testModule(t, "apply-output") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -6767,8 +6746,8 @@ func TestContext2Apply_outputAdd(t *testing.T) { func TestContext2Apply_outputList(t *testing.T) { m := testModule(t, "apply-output-list") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -6795,8 +6774,8 @@ func TestContext2Apply_outputList(t *testing.T) { func TestContext2Apply_outputMulti(t *testing.T) { m := testModule(t, "apply-output-multi") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -6823,8 +6802,8 @@ func TestContext2Apply_outputMulti(t *testing.T) { func TestContext2Apply_outputMultiIndex(t *testing.T) { m := testModule(t, "apply-output-multi-index") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -6912,8 +6891,8 @@ func TestContext2Apply_taintX(t *testing.T) { func TestContext2Apply_taintDep(t *testing.T) { m := testModule(t, "apply-taint-dep") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -6964,8 +6943,8 @@ func TestContext2Apply_taintDep(t *testing.T) { func TestContext2Apply_taintDepRequiresNew(t *testing.T) { m := testModule(t, "apply-taint-dep-requires-new") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -7016,8 +6995,8 @@ func TestContext2Apply_taintDepRequiresNew(t *testing.T) { func TestContext2Apply_targeted(t *testing.T) { m := testModule(t, "apply-targeted") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -7056,8 +7035,8 @@ aws_instance.foo: func TestContext2Apply_targetedCount(t *testing.T) { m := testModule(t, "apply-targeted-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -7098,8 +7077,8 @@ aws_instance.foo.2: func TestContext2Apply_targetedCountIndex(t *testing.T) { m := testModule(t, "apply-targeted-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -7132,7 +7111,6 @@ aws_instance.foo.1: func TestContext2Apply_targetedDestroy(t *testing.T) { m := testModule(t, "destroy-targeted") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -7214,7 +7192,6 @@ func TestContext2Apply_targetedDestroy(t *testing.T) { func TestContext2Apply_targetedDestroyCountDeps(t *testing.T) { m := testModule(t, "apply-destroy-targeted-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -7267,7 +7244,6 @@ func TestContext2Apply_targetedDestroyCountDeps(t *testing.T) { func TestContext2Apply_targetedDestroyModule(t *testing.T) { m := testModule(t, "apply-targeted-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -7347,7 +7323,6 @@ module.child: func TestContext2Apply_targetedDestroyCountIndex(t *testing.T) { m := testModule(t, "apply-targeted-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn foo := &states.ResourceInstanceObjectSrc{ @@ -7437,8 +7412,8 @@ aws_instance.foo.1: func TestContext2Apply_targetedModule(t *testing.T) { m := testModule(t, "apply-targeted-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -7486,8 +7461,8 @@ module.child: func TestContext2Apply_targetedModuleDep(t *testing.T) { m := testModule(t, "apply-targeted-module-dep") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -7538,8 +7513,8 @@ module.child: func TestContext2Apply_targetedModuleUnrelatedOutputs(t *testing.T) { m := testModule(t, "apply-targeted-module-unrelated-outputs") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() _ = state.EnsureModule(addrs.RootModuleInstance.Child("child2", addrs.NoKey)) @@ -7589,8 +7564,8 @@ module.child2: func TestContext2Apply_targetedModuleResource(t *testing.T) { m := testModule(t, "apply-targeted-module-resource") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -7631,7 +7606,6 @@ module.child: func TestContext2Apply_targetedResourceOrphanModule(t *testing.T) { m := testModule(t, "apply-targeted-resource-orphan-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -7670,7 +7644,6 @@ func TestContext2Apply_targetedResourceOrphanModule(t *testing.T) { func TestContext2Apply_unknownAttribute(t *testing.T) { m := testModule(t, "apply-unknown") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { resp = testDiffFn(req) planned := resp.PlannedState.AsValueMap() @@ -7678,8 +7651,9 @@ func TestContext2Apply_unknownAttribute(t *testing.T) { resp.PlannedState = cty.ObjectVal(planned) return resp } + p.ApplyResourceChangeFn = testApplyFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -7690,7 +7664,7 @@ func TestContext2Apply_unknownAttribute(t *testing.T) { }, }, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -7718,7 +7692,6 @@ func TestContext2Apply_unknownAttribute(t *testing.T) { func TestContext2Apply_unknownAttributeInterpolate(t *testing.T) { m := testModule(t, "apply-unknown-interpolate") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -7836,8 +7809,8 @@ func TestContext2Apply_createBefore_depends(t *testing.T) { m := testModule(t, "apply-depends-create-before") h := new(HookRecordApplyOrder) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) root.SetResourceInstanceCurrent( @@ -8025,7 +7998,7 @@ func TestContext2Apply_singleDestroy(t *testing.T) { } h.Active = true - state, diags := ctx.Apply() + _, diags := ctx.Apply() if diags.HasErrors() { t.Fatalf("diags: %s", diags.Err()) } @@ -8038,9 +8011,8 @@ func TestContext2Apply_singleDestroy(t *testing.T) { // GH-7824 func TestContext2Apply_issue7824(t *testing.T) { p := testProvider("template") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "template_file": { Attributes: map[string]*configschema.Attribute{ @@ -8049,7 +8021,7 @@ func TestContext2Apply_issue7824(t *testing.T) { }, }, }, - } + }) m, snap := testModuleWithSnapshot(t, "issue-7824") @@ -8094,9 +8066,9 @@ func TestContext2Apply_issue5254(t *testing.T) { // Create a provider. We use "template" here just to match the repro // we got from the issue itself. p := testProvider("template") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.ApplyResourceChangeFn = testApplyFn + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "template_file": { Attributes: map[string]*configschema.Attribute{ @@ -8107,7 +8079,7 @@ func TestContext2Apply_issue5254(t *testing.T) { }, }, }, - } + }) // Apply cleanly step 0 ctx := testContext2(t, &ContextOpts{ @@ -8117,7 +8089,7 @@ func TestContext2Apply_issue5254(t *testing.T) { }, }) - plan, diags := ctx.Plan() + _, diags := ctx.Plan() if diags.HasErrors() { t.Fatalf("err: %s", diags.Err()) } @@ -8138,7 +8110,7 @@ func TestContext2Apply_issue5254(t *testing.T) { }, }) - plan, diags = ctx.Plan() + plan, diags := ctx.Plan() if diags.HasErrors() { t.Fatalf("err: %s", diags.Err()) } @@ -8260,10 +8232,10 @@ aws_instance.ifailedprovisioners: (tainted) func TestContext2Apply_ignoreChangesCreate(t *testing.T) { m := testModule(t, "apply-ignore-changes-create") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn - instanceSchema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + instanceSchema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block instanceSchema.Attributes["required_field"] = &configschema.Attribute{ Type: cty.String, Required: true, @@ -8309,7 +8281,6 @@ aws_instance.foo: func TestContext2Apply_ignoreChangesWithDep(t *testing.T) { m := testModule(t, "apply-ignore-changes-dep") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { resp.PlannedState = req.ProposedNewState @@ -8404,10 +8375,10 @@ func TestContext2Apply_ignoreChangesWithDep(t *testing.T) { func TestContext2Apply_ignoreChangesWildcard(t *testing.T) { m := testModule(t, "apply-ignore-changes-wildcard") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn - instanceSchema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + instanceSchema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block instanceSchema.Attributes["required_field"] = &configschema.Attribute{ Type: cty.String, Required: true, @@ -8453,7 +8424,6 @@ aws_instance.foo: func TestContext2Apply_destroyNestedModuleWithAttrsReferencingResource(t *testing.T) { m, snap := testModuleWithSnapshot(t, "apply-destroy-nested-module-with-attrs") p := testProvider("null") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn var state *states.State @@ -8652,7 +8622,6 @@ resource "null_instance" "depends" { func TestContext2Apply_terraformWorkspace(t *testing.T) { m := testModule(t, "apply-terraform-workspace") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ @@ -8683,7 +8652,6 @@ func TestContext2Apply_terraformWorkspace(t *testing.T) { func TestContext2Apply_multiRef(t *testing.T) { m := testModule(t, "apply-multi-ref") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -8710,8 +8678,8 @@ func TestContext2Apply_multiRef(t *testing.T) { func TestContext2Apply_targetedModuleRecursive(t *testing.T) { m := testModule(t, "apply-targeted-module-recursive") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -8784,7 +8752,6 @@ result_3 = hello world func TestContext2Apply_destroyWithLocals(t *testing.T) { m := testModule(t, "apply-destroy-with-locals") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -8840,7 +8807,6 @@ func TestContext2Apply_providerWithLocals(t *testing.T) { } p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -8887,7 +8853,6 @@ func TestContext2Apply_providerWithLocals(t *testing.T) { func TestContext2Apply_destroyWithProviders(t *testing.T) { m := testModule(t, "destroy-module-with-provider") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -9045,7 +9010,6 @@ func TestContext2Apply_plannedInterpolatedCount(t *testing.T) { m, snap := testModuleWithSnapshot(t, "apply-interpolated-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn Providers := map[addrs.Provider]providers.Factory{ @@ -9099,7 +9063,6 @@ func TestContext2Apply_plannedDestroyInterpolatedCount(t *testing.T) { m, snap := testModuleWithSnapshot(t, "plan-destroy-interpolated-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn providers := map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), @@ -9165,7 +9128,6 @@ func TestContext2Apply_scaleInMultivarRef(t *testing.T) { m := testModule(t, "apply-resource-scale-in") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn Providers := map[addrs.Provider]providers.Factory{ @@ -9214,7 +9176,7 @@ func TestContext2Apply_scaleInMultivarRef(t *testing.T) { func TestContext2Apply_inconsistentWithPlan(t *testing.T) { m := testModule(t, "apply-inconsistent-with-plan") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test": { Attributes: map[string]*configschema.Attribute{ @@ -9222,7 +9184,7 @@ func TestContext2Apply_inconsistentWithPlan(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: cty.ObjectVal(map[string]cty.Value{ @@ -9267,7 +9229,7 @@ func TestContext2Apply_inconsistentWithPlan(t *testing.T) { func TestContext2Apply_issue19908(t *testing.T) { m := testModule(t, "apply-issue19908") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test": { Attributes: map[string]*configschema.Attribute{ @@ -9275,7 +9237,7 @@ func TestContext2Apply_issue19908(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: req.ProposedNewState, @@ -9348,7 +9310,7 @@ func TestContext2Apply_issue19908(t *testing.T) { func TestContext2Apply_invalidIndexRef(t *testing.T) { p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_instance": { Attributes: map[string]*configschema.Attribute{ @@ -9356,7 +9318,7 @@ func TestContext2Apply_invalidIndexRef(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = testDiffFn m := testModule(t, "apply-invalid-index") @@ -9397,7 +9359,6 @@ func TestContext2Apply_moduleReplaceCycle(t *testing.T) { p := testProvider("aws") p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn instanceSchema := &configschema.Block{ Attributes: map[string]*configschema.Attribute{ @@ -9406,11 +9367,11 @@ func TestContext2Apply_moduleReplaceCycle(t *testing.T) { }, } - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": instanceSchema, }, - } + }) state := states.NewState() modA := state.EnsureModule(addrs.RootModuleInstance.Child("a", addrs.NoKey)) @@ -9535,8 +9496,18 @@ func TestContext2Apply_moduleReplaceCycle(t *testing.T) { func TestContext2Apply_destroyDataCycle(t *testing.T) { m, snap := testModuleWithSnapshot(t, "apply-destroy-data-cycle") p := testProvider("null") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("new"), + "foo": cty.NullVal(cty.String), + }), + } + } + + tp := testProvider("test") + tp.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -9555,6 +9526,31 @@ func TestContext2Apply_destroyDataCycle(t *testing.T) { Module: addrs.RootModule, }, ) + root.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "a", + }.Instance(addrs.IntKey(0)), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a"}`), + Dependencies: []addrs.ConfigResource{ + addrs.ConfigResource{ + Resource: addrs.Resource{ + Mode: addrs.DataResourceMode, + Type: "null_data_source", + Name: "d", + }, + Module: addrs.RootModule, + }, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) root.SetResourceInstanceCurrent( addrs.Resource{ Mode: addrs.DataResourceMode, @@ -9563,7 +9559,7 @@ func TestContext2Apply_destroyDataCycle(t *testing.T) { }.Instance(addrs.NoKey), &states.ResourceInstanceObjectSrc{ Status: states.ObjectReady, - AttrsJSON: []byte(`{"id":"data"}`), + AttrsJSON: []byte(`{"id":"old"}`), }, addrs.AbsProviderConfig{ Provider: addrs.NewDefaultProvider("null"), @@ -9573,15 +9569,14 @@ func TestContext2Apply_destroyDataCycle(t *testing.T) { Providers := map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + addrs.NewDefaultProvider("test"): testProviderFuncFixed(tp), } - hook := &testHook{} ctx := testContext2(t, &ContextOpts{ Config: m, Providers: Providers, State: state, Destroy: true, - Hooks: []Hook{hook}, }) plan, diags := ctx.Plan() @@ -9603,6 +9598,19 @@ func TestContext2Apply_destroyDataCycle(t *testing.T) { t.Fatalf("failed to create context for plan: %s", diags.Err()) } + tp.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { + foo := req.Config.GetAttr("foo") + if !foo.IsKnown() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("unknown config value foo")) + return resp + } + + if foo.AsString() != "new" { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("wrong config value: %q", foo.AsString())) + } + return resp + } + _, diags = ctx.Apply() if diags.HasErrors() { t.Fatalf("diags: %s", diags.Err()) @@ -9632,7 +9640,7 @@ func TestContext2Apply_taintedDestroyFailure(t *testing.T) { return testApplyFn(req) } - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_instance": { Attributes: map[string]*configschema.Attribute{ @@ -9647,7 +9655,7 @@ func TestContext2Apply_taintedDestroyFailure(t *testing.T) { }, }, }, - } + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -9793,22 +9801,25 @@ func TestContext2Apply_plannedConnectionRefs(t *testing.T) { return resp } - pr := testProvisioner() - pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { - host := req.Connection.GetAttr("host") - if host.IsNull() || !host.IsKnown() { - resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("invalid host value: %#v", host)) - } + provisionerFactory := func() (provisioners.Interface, error) { + pr := testProvisioner() + pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + host := req.Connection.GetAttr("host") + if host.IsNull() || !host.IsKnown() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("invalid host value: %#v", host)) + } - return resp + return resp + } + return pr, nil } Providers := map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), } - provisioners := map[string]ProvisionerFactory{ - "shell": testProvisionerFuncFixed(pr), + provisioners := map[string]provisioners.Factory{ + "shell": provisionerFactory, } hook := &testHook{} @@ -9834,7 +9845,6 @@ func TestContext2Apply_plannedConnectionRefs(t *testing.T) { func TestContext2Apply_cbdCycle(t *testing.T) { m, snap := testModuleWithSnapshot(t, "apply-cbd-cycle") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn state := states.NewState() @@ -9954,7 +9964,7 @@ func TestContext2Apply_ProviderMeta_apply_set(t *testing.T) { m := testModule(t, "provider-meta-set") p := testProvider("test") p.PlanResourceChangeFn = testDiffFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -9981,7 +9991,7 @@ func TestContext2Apply_ProviderMeta_apply_set(t *testing.T) { NewState: cty.ObjectVal(s), } } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10037,7 +10047,7 @@ func TestContext2Apply_ProviderMeta_apply_unset(t *testing.T) { m := testModule(t, "provider-meta-unset") p := testProvider("test") p.PlanResourceChangeFn = testDiffFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -10062,7 +10072,7 @@ func TestContext2Apply_ProviderMeta_apply_unset(t *testing.T) { NewState: cty.ObjectVal(s), } } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10096,8 +10106,7 @@ func TestContext2Apply_ProviderMeta_apply_unset(t *testing.T) { func TestContext2Apply_ProviderMeta_plan_set(t *testing.T) { m := testModule(t, "provider-meta-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -10113,7 +10122,7 @@ func TestContext2Apply_ProviderMeta_plan_set(t *testing.T) { PlannedState: req.ProposedNewState, } } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10165,8 +10174,7 @@ func TestContext2Apply_ProviderMeta_plan_set(t *testing.T) { func TestContext2Apply_ProviderMeta_plan_unset(t *testing.T) { m := testModule(t, "provider-meta-unset") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -10182,7 +10190,7 @@ func TestContext2Apply_ProviderMeta_plan_unset(t *testing.T) { PlannedState: req.ProposedNewState, } } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10213,7 +10221,6 @@ func TestContext2Apply_ProviderMeta_plan_unset(t *testing.T) { func TestContext2Apply_ProviderMeta_plan_setNoSchema(t *testing.T) { m := testModule(t, "provider-meta-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -10253,9 +10260,8 @@ func TestContext2Apply_ProviderMeta_plan_setNoSchema(t *testing.T) { func TestContext2Apply_ProviderMeta_plan_setInvalid(t *testing.T) { m := testModule(t, "provider-meta-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "quux": { @@ -10264,7 +10270,7 @@ func TestContext2Apply_ProviderMeta_plan_setInvalid(t *testing.T) { }, }, } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10307,9 +10313,8 @@ func TestContext2Apply_ProviderMeta_plan_setInvalid(t *testing.T) { func TestContext2Apply_ProviderMeta_refresh_set(t *testing.T) { m := testModule(t, "provider-meta-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -10319,17 +10324,16 @@ func TestContext2Apply_ProviderMeta_refresh_set(t *testing.T) { }, } rrcPMs := map[string]cty.Value{} - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { rrcPMs[req.TypeName] = req.ProviderMeta - newState, err := p.GetSchemaReturn.ResourceTypes[req.TypeName].CoerceValue(p.ReadResourceResponse.NewState) + newState, err := p.GetSchemaResponse.ResourceTypes[req.TypeName].Block.CoerceValue(req.PriorState) if err != nil { panic(err) } - resp := p.ReadResourceResponse resp.NewState = newState return resp } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10387,11 +10391,10 @@ func TestContext2Apply_ProviderMeta_refresh_set(t *testing.T) { func TestContext2Apply_ProviderMeta_refresh_setNoSchema(t *testing.T) { m := testModule(t, "provider-meta-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn // we need a schema for plan/apply so they don't error - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -10400,7 +10403,7 @@ func TestContext2Apply_ProviderMeta_refresh_setNoSchema(t *testing.T) { }, }, } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10416,7 +10419,7 @@ func TestContext2Apply_ProviderMeta_refresh_setNoSchema(t *testing.T) { // drop the schema before refresh, to test that it errors schema.ProviderMeta = nil - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx = testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10456,11 +10459,10 @@ func TestContext2Apply_ProviderMeta_refresh_setNoSchema(t *testing.T) { func TestContext2Apply_ProviderMeta_refresh_setInvalid(t *testing.T) { m := testModule(t, "provider-meta-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn // we need a matching schema for plan/apply so they don't error - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -10469,7 +10471,7 @@ func TestContext2Apply_ProviderMeta_refresh_setInvalid(t *testing.T) { }, }, } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10492,7 +10494,7 @@ func TestContext2Apply_ProviderMeta_refresh_setInvalid(t *testing.T) { }, }, } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx = testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10536,9 +10538,8 @@ func TestContext2Apply_ProviderMeta_refresh_setInvalid(t *testing.T) { func TestContext2Apply_ProviderMeta_refreshdata_set(t *testing.T) { m := testModule(t, "provider-meta-data-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -10547,7 +10548,7 @@ func TestContext2Apply_ProviderMeta_refreshdata_set(t *testing.T) { }, }, } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10632,9 +10633,8 @@ func TestContext2Apply_ProviderMeta_refreshdata_set(t *testing.T) { func TestContext2Apply_ProviderMeta_refreshdata_unset(t *testing.T) { m := testModule(t, "provider-meta-data-unset") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "baz": { @@ -10643,7 +10643,7 @@ func TestContext2Apply_ProviderMeta_refreshdata_unset(t *testing.T) { }, }, } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10701,7 +10701,6 @@ func TestContext2Apply_ProviderMeta_refreshdata_unset(t *testing.T) { func TestContext2Apply_ProviderMeta_refreshdata_setNoSchema(t *testing.T) { m := testModule(t, "provider-meta-data-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -10709,7 +10708,7 @@ func TestContext2Apply_ProviderMeta_refreshdata_setNoSchema(t *testing.T) { addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), }, }) - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yo"), "foo": cty.StringVal("bar"), @@ -10747,9 +10746,8 @@ func TestContext2Apply_ProviderMeta_refreshdata_setNoSchema(t *testing.T) { func TestContext2Apply_ProviderMeta_refreshdata_setInvalid(t *testing.T) { m := testModule(t, "provider-meta-data-set") p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn - schema := p.GetSchemaReturn + schema := p.ProviderSchema() schema.ProviderMeta = &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "quux": { @@ -10758,14 +10756,14 @@ func TestContext2Apply_ProviderMeta_refreshdata_setInvalid(t *testing.T) { }, }, } - p.GetSchemaReturn = schema + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), }, }) - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yo"), "foo": cty.StringVal("bar"), @@ -10834,8 +10832,8 @@ output "out" { }) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -10895,7 +10893,6 @@ resource "aws_instance" "cbd" { }) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -11027,7 +11024,6 @@ output "c" { }`}) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -11096,7 +11092,6 @@ output "myoutput" { `}) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -11220,7 +11215,6 @@ locals { ) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = func(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { n := r.ProposedNewState.AsValueMap() @@ -11384,7 +11378,7 @@ output "output" { testP.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { return providers.ReadResourceResponse{NewState: req.PriorState} } - testP.GetSchemaReturn = schemaFn("test") + testP.GetSchemaResponse = getSchemaResponseFromProviderSchema(schemaFn("test")) providerConfig := "" testP.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { @@ -11409,12 +11403,12 @@ output "output" { nullP.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { return providers.ReadResourceResponse{NewState: req.PriorState} } - nullP.GetSchemaReturn = schemaFn("null") + nullP.GetSchemaResponse = getSchemaResponseFromProviderSchema(schemaFn("null")) nullP.ApplyResourceChangeFn = testApplyFn nullP.PlanResourceChangeFn = testDiffFn - nullP.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + nullP.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("ID"), "output": cty.StringVal("valid"), @@ -11510,7 +11504,6 @@ output "outputs" { `}) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { return providers.ReadDataSourceResponse{ @@ -11590,7 +11583,6 @@ resource "test_resource" "a" { `}) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { proposed := req.ProposedNewState.AsValueMap() proposed["id"] = cty.UnknownVal(cty.String) @@ -11662,7 +11654,6 @@ resource "test_instance" "b" { `}) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ @@ -11725,7 +11716,6 @@ resource "test_resource" "c" { `}) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ @@ -11794,8 +11784,39 @@ resource "test_resource" "foo" { }`, }) - p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn + p := new(MockProvider) + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ + Provider: &configschema.Block{}, + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "value": { + Type: cty.String, + Optional: true, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "network_interface": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "network_interface_id": {Type: cty.String, Optional: true}, + "device_index": {Type: cty.Number, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }) p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ @@ -11874,7 +11895,6 @@ resource "test_resource" "foo" { }) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ @@ -11943,7 +11963,6 @@ resource "test_resource" "baz" { }) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ @@ -12008,7 +12027,6 @@ resource "test_resource" "foo" { }) p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ @@ -12046,6 +12064,9 @@ resource "test_resource" "foo" { fooState := state.ResourceInstance(addr) + if len(fooState.Current.AttrSensitivePaths) != 1 { + t.Fatalf("wrong number of sensitive paths, expected 1, got, %v", len(fooState.Current.AttrSensitivePaths)) + } got := fooState.Current.AttrSensitivePaths[0] want := cty.PathValueMarks{ Path: cty.GetAttrPath("value"), @@ -12145,6 +12166,7 @@ output "out" { func TestContext2Apply_provisionerSensitive(t *testing.T) { m := testModule(t, "apply-provisioner-sensitive") p := testProvider("aws") + pr := testProvisioner() pr.ProvisionResourceFn = func(req provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { if req.Config.ContainsMarked() { @@ -12157,8 +12179,8 @@ func TestContext2Apply_provisionerSensitive(t *testing.T) { req.UIOutput.Output(fmt.Sprintf("Executing: %q", command.AsString())) return } - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = testApplyFn h := new(MockHook) ctx := testContext2(t, &ContextOpts{ @@ -12167,7 +12189,7 @@ func TestContext2Apply_provisionerSensitive(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, Variables: InputValues{ @@ -12183,6 +12205,9 @@ func TestContext2Apply_provisionerSensitive(t *testing.T) { t.Fatal("plan failed") } + // "restart" provisioner + pr.CloseCalled = false + state, diags := ctx.Apply() if diags.HasErrors() { logDiagnostics(t, diags) @@ -12250,3 +12275,297 @@ resource "test_resource" "foo" { t.Fatal("missing 'test_resource.foo' in state:", state) } } + +func TestContext2Apply_rpcDiagnostics(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = testDiffFn + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + resp = testApplyFn(req) + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("don't frobble")) + return resp + } + + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + _, diags = ctx.Apply() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if len(diags) == 0 { + t.Fatal("expected warnings") + } + + for _, d := range diags { + des := d.Description().Summary + if !strings.Contains(des, "frobble") { + t.Fatalf(`expected frobble, got %q`, des) + } + } +} + +func TestContext2Apply_dataSensitive(t *testing.T) { + m := testModule(t, "apply-data-sensitive") + p := testProvider("null") + p.PlanResourceChangeFn = testDiffFn + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + // add the required id + m := req.Config.AsValueMap() + m["id"] = cty.StringVal("foo") + + return providers.ReadDataSourceResponse{ + State: cty.ObjectVal(m), + } + } + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("null"): testProviderFuncFixed(p), + }, + }) + + if p, diags := ctx.Plan(); diags.HasErrors() { + t.Fatalf("diags: %s", diags.Err()) + } else { + t.Logf(legacyDiffComparisonString(p.Changes)) + } + + state, diags := ctx.Apply() + assertNoErrors(t, diags) + + addr := mustResourceInstanceAddr("data.null_data_source.testing") + + dataSourceState := state.ResourceInstance(addr) + pvms := dataSourceState.Current.AttrSensitivePaths + if len(pvms) != 1 { + t.Fatalf("expected 1 sensitive path, got %d", len(pvms)) + } + pvm := pvms[0] + if gotPath, wantPath := pvm.Path, cty.GetAttrPath("foo"); !gotPath.Equals(wantPath) { + t.Errorf("wrong path\n got: %#v\nwant: %#v", gotPath, wantPath) + } + if gotMarks, wantMarks := pvm.Marks, cty.NewValueMarks("sensitive"); !gotMarks.Equal(wantMarks) { + t.Errorf("wrong marks\n got: %#v\nwant: %#v", gotMarks, wantMarks) + } +} + +func TestContext2Apply_errorRestorePrivateData(t *testing.T) { + // empty config to remove our resource + m := testModuleInline(t, map[string]string{ + "main.tf": "", + }) + + p := simpleMockProvider() + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ + // we error during apply, which will trigger core to preserve the last + // known state, including private data + Diagnostics: tfdiags.Diagnostics(nil).Append(errors.New("oops")), + } + + addr := mustResourceInstanceAddr("test_object.a") + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + Private: []byte("private"), + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + State: state, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + state, _ = ctx.Apply() + if string(state.ResourceInstance(addr).Current.Private) != "private" { + t.Fatal("missing private data in state") + } +} + +func TestContext2Apply_errorRestoreStatus(t *testing.T) { + // empty config to remove our resource + m := testModuleInline(t, map[string]string{ + "main.tf": "", + }) + + p := simpleMockProvider() + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + // We error during apply, but return the current object state. + resp.Diagnostics = resp.Diagnostics.Append(errors.New("oops")) + // return a warning too to make sure it isn't dropped + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("warned")) + resp.NewState = req.PriorState + resp.Private = req.PlannedPrivate + return resp + } + + addr := mustResourceInstanceAddr("test_object.a") + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + Status: states.ObjectTainted, + AttrsJSON: []byte(`{"test_string":"foo"}`), + Private: []byte("private"), + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + State: state, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + state, diags = ctx.Apply() + + errString := diags.ErrWithWarnings().Error() + if !strings.Contains(errString, "oops") || !strings.Contains(errString, "warned") { + t.Fatalf("error missing expected info: %q", errString) + } + + if len(diags) != 2 { + t.Fatalf("expected 1 error and 1 warning, got: %q", errString) + } + + res := state.ResourceInstance(addr) + if res == nil { + t.Fatal("resource was removed from state") + } + + if res.Current.Status != states.ObjectTainted { + t.Fatal("resource should still be tainted in the state") + } + + if string(res.Current.Private) != "private" { + t.Fatalf("incorrect private data, got %q", res.Current.Private) + } +} + +func TestContext2Apply_nonConformingResponse(t *testing.T) { + // empty config to remove our resource + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { + test_string = "x" +} +`, + }) + + p := simpleMockProvider() + respDiags := tfdiags.Diagnostics(nil).Append(tfdiags.SimpleWarning("warned")) + respDiags = respDiags.Append(errors.New("oops")) + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{ + // Don't lose these diagnostics + Diagnostics: respDiags, + // This state is missing required attributes, and should produce an error + NewState: cty.ObjectVal(map[string]cty.Value{ + "test_string": cty.StringVal("x"), + }), + } + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + _, diags = ctx.Apply() + errString := diags.ErrWithWarnings().Error() + if !strings.Contains(errString, "oops") || !strings.Contains(errString, "warned") { + t.Fatalf("error missing expected info: %q", errString) + } + + // we should have more than the ones returned from the provider, and they + // should not be coalesced into a single value + if len(diags) < 3 { + t.Fatalf("incorrect diagnostics, got %d values with %s", len(diags), diags.ErrWithWarnings()) + } +} + +func TestContext2Apply_nilResponse(t *testing.T) { + // empty config to remove our resource + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} +`, + }) + + p := simpleMockProvider() + p.ApplyResourceChangeResponse = &providers.ApplyResourceChangeResponse{} + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + _, diags = ctx.Apply() + if !diags.HasErrors() { + t.Fatal("expected and error") + } + + errString := diags.ErrWithWarnings().Error() + if !strings.Contains(errString, "invalid nil value") { + t.Fatalf("error missing expected info: %q", errString) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// NOTE: Due to the size of this file, new tests should be added to +// context_apply2_test.go. +//////////////////////////////////////////////////////////////////////////////// diff --git a/terraform/context_components.go b/terraform/context_components.go index c893a16b4..354337dd3 100644 --- a/terraform/context_components.go +++ b/terraform/context_components.go @@ -26,7 +26,7 @@ type contextComponentFactory interface { // basicComponentFactory just calls a factory from a map directly. type basicComponentFactory struct { providers map[addrs.Provider]providers.Factory - provisioners map[string]ProvisionerFactory + provisioners map[string]provisioners.Factory } func (c *basicComponentFactory) ResourceProviders() []string { diff --git a/terraform/context_components_test.go b/terraform/context_components_test.go index 28feebdc3..646b75509 100644 --- a/terraform/context_components_test.go +++ b/terraform/context_components_test.go @@ -32,7 +32,7 @@ func simpleMockComponentFactory() *basicComponentFactory { return provider, nil }, }, - provisioners: map[string]ProvisionerFactory{ + provisioners: map[string]provisioners.Factory{ "test": func() (provisioners.Interface, error) { return provisioner, nil }, @@ -62,19 +62,19 @@ func simpleTestSchema() *configschema.Block { Optional: true, }, "test_number": { - Type: cty.String, + Type: cty.Number, Optional: true, }, "test_bool": { - Type: cty.String, + Type: cty.Bool, Optional: true, }, "test_list": { - Type: cty.String, + Type: cty.List(cty.String), Optional: true, }, "test_map": { - Type: cty.String, + Type: cty.Map(cty.String), Optional: true, }, }, diff --git a/terraform/context_fixtures_test.go b/terraform/context_fixtures_test.go index 2ed264396..3b4ab0103 100644 --- a/terraform/context_fixtures_test.go +++ b/terraform/context_fixtures_test.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" "github.com/zclconf/go-cty/cty" ) @@ -16,7 +17,7 @@ import ( type contextTestFixture struct { Config *configs.Config Providers map[addrs.Provider]providers.Factory - Provisioners map[string]ProvisionerFactory + Provisioners map[string]provisioners.Factory } // ContextOpts returns a ContextOps pre-populated with the elements of this diff --git a/terraform/context_import_test.go b/terraform/context_import_test.go index 01b9c2de2..6aa5892f2 100644 --- a/terraform/context_import_test.go +++ b/terraform/context_import_test.go @@ -22,10 +22,14 @@ func TestContextImport_basic(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -59,10 +63,14 @@ func TestContextImport_countIndex(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -117,10 +125,14 @@ func TestContextImport_collision(t *testing.T) { }), }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -152,9 +164,13 @@ func TestContextImport_missingType(t *testing.T) { p := testProvider("aws") m := testModule(t, "import-provider") - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -189,10 +205,14 @@ func TestContextImport_missingType(t *testing.T) { func TestContextImport_moduleProvider(t *testing.T) { p := testProvider("aws") - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -249,10 +269,14 @@ func TestContextImport_providerModule(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -317,10 +341,14 @@ func TestContextImport_providerConfig(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -368,10 +396,14 @@ func TestContextImport_providerConfigResources(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -403,16 +435,20 @@ func TestContextImport_refresh(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("foo"), "foo": cty.StringVal("bar"), @@ -450,10 +486,14 @@ func TestContextImport_refreshNil(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -494,10 +534,14 @@ func TestContextImport_module(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -532,10 +576,14 @@ func TestContextImport_moduleDepth2(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -570,10 +618,14 @@ func TestContextImport_moduleDiff(t *testing.T) { }, }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, }, } @@ -602,7 +654,7 @@ func TestContextImport_multiState(t *testing.T) { p := testProvider("aws") m := testModule(t, "import-provider") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": {Type: cty.String, Optional: true}, @@ -620,16 +672,22 @@ func TestContextImport_multiState(t *testing.T) { }, }, }, - } + }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, - }, - &InstanceState{ - ID: "bar", - Ephemeral: EphemeralState{Type: "aws_instance_thing"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + { + TypeName: "aws_instance_thing", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + }, }, } @@ -665,7 +723,7 @@ func TestContextImport_multiStateSame(t *testing.T) { p := testProvider("aws") m := testModule(t, "import-provider") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": {Type: cty.String, Optional: true}, @@ -683,20 +741,28 @@ func TestContextImport_multiStateSame(t *testing.T) { }, }, }, - } + }) - p.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "foo", - Ephemeral: EphemeralState{Type: "aws_instance"}, - }, - &InstanceState{ - ID: "bar", - Ephemeral: EphemeralState{Type: "aws_instance_thing"}, - }, - &InstanceState{ - ID: "qux", - Ephemeral: EphemeralState{Type: "aws_instance_thing"}, + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("foo"), + }), + }, + { + TypeName: "aws_instance_thing", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + }, + { + TypeName: "aws_instance_thing", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("qux"), + }), + }, }, } @@ -763,7 +829,7 @@ resource "test_resource" "unused" { `, }) - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": {Type: cty.String, Optional: true}, @@ -776,9 +842,19 @@ resource "test_resource" "unused" { }, }, }, - } + }) - p.ImportResourceStateResponse = providers.ImportResourceStateResponse{ + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "test_resource", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("test"), + }), + }, + }, + } + p.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ ImportedResources: []providers.ImportedResource{ { TypeName: "test_resource", @@ -845,17 +921,6 @@ module.child[0].nested: provider = provider["registry.terraform.io/hashicorp/aws"] ` -const testImportModuleExistingStr = ` - -module.foo: - aws_instance.bar: - ID = bar - provider = provider["registry.terraform.io/hashicorp/aws"] - aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] -` - const testImportMultiStr = ` aws_instance.foo: ID = foo diff --git a/terraform/context_input_test.go b/terraform/context_input_test.go index 1887ac97c..0356f8687 100644 --- a/terraform/context_input_test.go +++ b/terraform/context_input_test.go @@ -17,9 +17,7 @@ import ( func TestContext2Input_provider(t *testing.T) { m := testModule(t, "input-provider") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": { @@ -39,7 +37,7 @@ func TestContext2Input_provider(t *testing.T) { }, }, }, - } + }) inp := &MockUIInput{ InputReturnMap: map[string]string{ @@ -89,9 +87,7 @@ func TestContext2Input_providerMulti(t *testing.T) { m := testModule(t, "input-provider-multi") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": { @@ -111,7 +107,7 @@ func TestContext2Input_providerMulti(t *testing.T) { }, }, }, - } + }) inp := &MockUIInput{ InputReturnMap: map[string]string{ @@ -158,8 +154,6 @@ func TestContext2Input_providerMulti(t *testing.T) { func TestContext2Input_providerOnce(t *testing.T) { m := testModule(t, "input-provider-once") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -178,9 +172,7 @@ func TestContext2Input_providerId(t *testing.T) { m := testModule(t, "input-provider") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": { @@ -200,7 +192,7 @@ func TestContext2Input_providerId(t *testing.T) { }, }, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -241,11 +233,8 @@ func TestContext2Input_providerOnly(t *testing.T) { input := new(MockUIInput) m := testModule(t, "input-provider-vars") - p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": { @@ -263,7 +252,7 @@ func TestContext2Input_providerOnly(t *testing.T) { }, }, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -317,8 +306,6 @@ func TestContext2Input_providerVars(t *testing.T) { input := new(MockUIInput) m := testModule(t, "input-provider-with-vars") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -363,8 +350,6 @@ func TestContext2Input_providerVarsModuleInherit(t *testing.T) { input := new(MockUIInput) m := testModule(t, "input-provider-with-vars-and-module") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -383,8 +368,6 @@ func TestContext2Input_submoduleTriggersInvalidCount(t *testing.T) { input := new(MockUIInput) m := testModule(t, "input-submodule-count") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -405,7 +388,7 @@ func TestContext2Input_dataSourceRequiresRefresh(t *testing.T) { p := testProvider("null") m := testModule(t, "input-module-data-vars") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ DataSources: map[string]*configschema.Block{ "null_data_source": { Attributes: map[string]*configschema.Attribute{ @@ -413,7 +396,7 @@ func TestContext2Input_dataSourceRequiresRefresh(t *testing.T) { }, }, }, - } + }) p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { return providers.ReadDataSourceResponse{ State: req.Config, diff --git a/terraform/context_plan2_test.go b/terraform/context_plan2_test.go new file mode 100644 index 000000000..1e93e35fa --- /dev/null +++ b/terraform/context_plan2_test.go @@ -0,0 +1,251 @@ +package terraform + +import ( + "errors" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" +) + +func TestContext2Plan_removedDuringRefresh(t *testing.T) { + // The resource was added to state but actually failed to create and was + // left tainted. This should be removed during plan and result in a Create + // action. + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_object" "a" { +} +`, + }) + + p := simpleMockProvider() + p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + resp.NewState = cty.NullVal(req.PriorState.Type()) + return resp + } + + addr := mustResourceInstanceAddr("test_object.a") + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(addr, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"test_string":"foo"}`), + Status: states.ObjectTainted, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + State: state, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.Create { + t.Fatalf("expected Create action for missing %s, got %s", c.Addr, c.Action) + } + } +} + +func TestContext2Plan_noChangeDataSourceSensitiveNestedSet(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "bar" { + sensitive = true + default = "baz" +} + +data "test_data_source" "foo" { + foo { + bar = var.bar + } +} +`, + }) + + p := new(MockProvider) + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ + DataSources: map[string]*configschema.Block{ + "test_data_source": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "foo": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": {Type: cty.String, Optional: true}, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + }, + }) + + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("data_id"), + "foo": cty.SetVal([]cty.Value{cty.ObjectVal(map[string]cty.Value{"bar": cty.StringVal("baz")})}), + }), + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("data.test_data_source.foo").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"data_id", "foo":[{"bar":"baz"}]}`), + AttrSensitivePaths: []cty.PathValueMarks{ + { + Path: cty.GetAttrPath("foo"), + Marks: cty.NewValueMarks("sensitive"), + }, + }, + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + State: state, + }) + + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.ErrWithWarnings()) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.NoOp { + t.Fatalf("expected NoOp, got: %q %s", res.Addr, res.Action) + } + } +} + +func TestContext2Plan_orphanDataInstance(t *testing.T) { + // ensure the planned replacement of the data source is evaluated properly + m := testModuleInline(t, map[string]string{ + "main.tf": ` +data "test_object" "a" { + for_each = { new = "ok" } +} + +output "out" { + value = [ for k, _ in data.test_object.a: k ] +} +`, + }) + + p := simpleMockProvider() + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + resp.State = req.Config + return resp + } + + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent(mustResourceInstanceAddr(`data.test_object.a["old"]`), &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"test_string":"foo"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + State: state, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + change, err := plan.Changes.Outputs[0].Decode() + if err != nil { + t.Fatal(err) + } + + expected := cty.TupleVal([]cty.Value{cty.StringVal("new")}) + + if change.After.Equals(expected).False() { + t.Fatalf("expected %#v, got %#v\n", expected, change.After) + } +} + +func TestContext2Plan_basicConfigurationAliases(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +provider "test" { + alias = "z" + test_string = "config" +} + +module "mod" { + source = "./mod" + providers = { + test.x = test.z + } +} +`, + + "mod/main.tf": ` +terraform { + required_providers { + test = { + source = "registry.terraform.io/hashicorp/test" + configuration_aliases = [ test.x ] + } + } +} + +resource "test_object" "a" { + provider = test.x +} + +`, + }) + + p := simpleMockProvider() + + // The resource within the module should be using the provider configured + // from the root module. We should never see an empty configuration. + p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { + if req.Config.GetAttr("test_string").IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing test_string value")) + } + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } +} diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 819870f96..5507cc08d 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/terraform/configs/hcl2shim" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/tfdiags" ) @@ -27,7 +28,6 @@ import ( func TestContext2Plan_basic(t *testing.T) { m := testModule(t, "plan-good") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -55,7 +55,7 @@ func TestContext2Plan_basic(t *testing.T) { t.Fatalf("expected empty state, got %#v\n", ctx.State()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() for _, r := range plan.Changes.Resources { ric, err := r.Decode(ty) @@ -78,6 +78,11 @@ func TestContext2Plan_basic(t *testing.T) { t.Fatal("unknown instance:", i) } } + + if !p.PrepareProviderConfigCalled { + t.Fatal("provider config was not checked before Configure") + } + } func TestContext2Plan_createBefore_deposed(t *testing.T) { @@ -130,7 +135,7 @@ func TestContext2Plan_createBefore_deposed(t *testing.T) { t.Fatalf("\nexpected: %q\ngot: %q\n", expectedState, ctx.State().String()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() type InstanceGen struct { @@ -198,7 +203,6 @@ func TestContext2Plan_createBefore_deposed(t *testing.T) { func TestContext2Plan_createBefore_maintainRoot(t *testing.T) { m := testModule(t, "plan-cbd-maintain-root") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -295,7 +299,7 @@ func TestContext2Plan_escapedVar(t *testing.T) { t.Fatalf("expected resource creation, got %s", res.Action) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() ric, err := res.Decode(ty) @@ -315,7 +319,6 @@ func TestContext2Plan_escapedVar(t *testing.T) { func TestContext2Plan_minimal(t *testing.T) { m := testModule(t, "plan-empty") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -371,7 +374,7 @@ func TestContext2Plan_modules(t *testing.T) { t.Error("expected 3 resource in plan, got", len(plan.Changes.Resources)) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() expectFoo := objectVal(t, schema, map[string]cty.Value{ @@ -414,7 +417,6 @@ func TestContext2Plan_moduleExpand(t *testing.T) { // Test a smattering of plan expansion behavior m := testModule(t, "plan-modules-expand") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -427,7 +429,7 @@ func TestContext2Plan_moduleExpand(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() expected := map[string]struct{}{ @@ -464,7 +466,7 @@ func TestContext2Plan_moduleExpand(t *testing.T) { func TestContext2Plan_moduleCycle(t *testing.T) { m := testModule(t, "plan-module-cycle") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -474,8 +476,7 @@ func TestContext2Plan_moduleCycle(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -489,7 +490,7 @@ func TestContext2Plan_moduleCycle(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -544,7 +545,7 @@ func TestContext2Plan_moduleDeadlock(t *testing.T) { t.Fatalf("err: %s", err) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() for _, res := range plan.Changes.Resources { @@ -589,7 +590,7 @@ func TestContext2Plan_moduleInput(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -644,7 +645,7 @@ func TestContext2Plan_moduleInputComputed(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -702,7 +703,7 @@ func TestContext2Plan_moduleInputFromVar(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -740,7 +741,7 @@ func TestContext2Plan_moduleInputFromVar(t *testing.T) { func TestContext2Plan_moduleMultiVar(t *testing.T) { m := testModule(t, "plan-module-multi-var") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -750,8 +751,7 @@ func TestContext2Plan_moduleMultiVar(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -765,7 +765,7 @@ func TestContext2Plan_moduleMultiVar(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 5 { @@ -840,7 +840,7 @@ func TestContext2Plan_moduleOrphans(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -888,8 +888,8 @@ module.child: func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) { m := testModule(t, "plan-modules-remove-provisioners") p := testProvider("aws") - pr := testProvisioner() p.PlanResourceChangeFn = testDiffFn + pr := testProvisioner() state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -925,7 +925,7 @@ func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, State: state, @@ -936,7 +936,7 @@ func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 3 { @@ -1002,7 +1002,7 @@ func TestContext2Plan_moduleProviderInherit(t *testing.T) { defer l.Unlock() p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "from": {Type: cty.String, Optional: true}, @@ -1015,7 +1015,7 @@ func TestContext2Plan_moduleProviderInherit(t *testing.T) { }, }, }, - } + }) p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { from := req.Config.GetAttr("from") if from.IsNull() || from.AsString() != "root" { @@ -1066,7 +1066,7 @@ func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) { var from string p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "from": {Type: cty.String, Optional: true}, @@ -1077,7 +1077,7 @@ func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) { Attributes: map[string]*configschema.Attribute{}, }, }, - } + }) p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { v := req.Config.GetAttr("from") @@ -1121,7 +1121,7 @@ func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { defer l.Unlock() p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "to": {Type: cty.String, Optional: true}, @@ -1135,7 +1135,7 @@ func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { }, }, }, - } + }) p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { var buf bytes.Buffer from := req.Config.GetAttr("from") @@ -1153,7 +1153,6 @@ func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { return } - p.PlanResourceChangeFn = testDiffFn return p, nil }, }, @@ -1183,7 +1182,7 @@ func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { func TestContext2Plan_moduleProviderVar(t *testing.T) { m := testModule(t, "plan-module-provider-var") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "value": {Type: cty.String, Optional: true}, @@ -1196,8 +1195,7 @@ func TestContext2Plan_moduleProviderVar(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -1211,7 +1209,7 @@ func TestContext2Plan_moduleProviderVar(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -1254,7 +1252,7 @@ func TestContext2Plan_moduleVar(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -1296,7 +1294,6 @@ func TestContext2Plan_moduleVar(t *testing.T) { func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) { m := testModule(t, "plan-module-wrong-var-type") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1313,7 +1310,6 @@ func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) { func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) { m := testModule(t, "plan-module-wrong-var-type-nested") p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1330,7 +1326,6 @@ func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) { func TestContext2Plan_moduleVarWithDefaultValue(t *testing.T) { m := testModule(t, "plan-module-var-with-default-value") p := testProvider("null") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1359,7 +1354,7 @@ func TestContext2Plan_moduleVarComputed(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -1466,7 +1461,6 @@ func TestContext2Plan_preventDestroy_good(t *testing.T) { func TestContext2Plan_preventDestroy_countBad(t *testing.T) { m := testModule(t, "plan-prevent-destroy-count-bad") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1509,7 +1503,7 @@ func TestContext2Plan_preventDestroy_countBad(t *testing.T) { func TestContext2Plan_preventDestroy_countGood(t *testing.T) { m := testModule(t, "plan-prevent-destroy-count-good") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -1518,8 +1512,7 @@ func TestContext2Plan_preventDestroy_countGood(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1561,7 +1554,8 @@ func TestContext2Plan_preventDestroy_countGood(t *testing.T) { func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) { m := testModule(t, "plan-prevent-destroy-count-good") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.PlanResourceChangeFn = testDiffFn + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -1571,8 +1565,7 @@ func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1606,7 +1599,6 @@ func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) { func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) { m := testModule(t, "plan-prevent-destroy-good") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1642,14 +1634,13 @@ func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) { func TestContext2Plan_provisionerCycle(t *testing.T) { m := testModule(t, "plan-provisioner-cycle") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn pr := testProvisioner() ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "local-exec": testProvisionerFuncFixed(pr), }, }) @@ -1676,7 +1667,7 @@ func TestContext2Plan_computed(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -1716,7 +1707,7 @@ func TestContext2Plan_computed(t *testing.T) { func TestContext2Plan_blockNestingGroup(t *testing.T) { m := testModule(t, "plan-block-nesting-group") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test": { BlockTypes: map[string]*configschema.NestedBlock{ @@ -1731,7 +1722,7 @@ func TestContext2Plan_blockNestingGroup(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: req.ProposedNewState, @@ -1788,7 +1779,7 @@ func TestContext2Plan_blockNestingGroup(t *testing.T) { func TestContext2Plan_computedDataResource(t *testing.T) { m := testModule(t, "plan-computed-data-resource") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -1805,8 +1796,7 @@ func TestContext2Plan_computedDataResource(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -1819,7 +1809,7 @@ func TestContext2Plan_computedDataResource(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.DataSources["aws_vpc"] + schema := p.GetSchemaResponse.DataSources["aws_vpc"].Block ty := schema.ImpliedType() if rc := plan.Changes.ResourceInstance(addrs.Resource{Mode: addrs.ManagedResourceMode, Type: "aws_instance", Name: "foo"}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)); rc == nil { @@ -1850,7 +1840,7 @@ func TestContext2Plan_computedDataResource(t *testing.T) { func TestContext2Plan_computedInFunction(t *testing.T) { m := testModule(t, "plan-computed-in-function") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -1865,9 +1855,8 @@ func TestContext2Plan_computedInFunction(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "computed": cty.ListVal([]cty.Value{ cty.StringVal("foo"), @@ -1896,7 +1885,7 @@ func TestContext2Plan_computedInFunction(t *testing.T) { func TestContext2Plan_computedDataCountResource(t *testing.T) { m := testModule(t, "plan-computed-data-count") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -1913,8 +1902,7 @@ func TestContext2Plan_computedDataCountResource(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -1945,7 +1933,6 @@ func TestContext2Plan_computedDataCountResource(t *testing.T) { func TestContext2Plan_localValueCount(t *testing.T) { m := testModule(t, "plan-local-value-count") p := testProvider("test") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1976,7 +1963,7 @@ func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) { m := testModule(t, "plan-data-resource-becomes-computed") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -1993,7 +1980,7 @@ func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { fooVal := req.ProposedNewState.GetAttr("foo") @@ -2006,10 +1993,10 @@ func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) { } } - schema := p.GetSchemaReturn.DataSources["aws_data_source"] + schema := p.GetSchemaResponse.DataSources["aws_data_source"].Block ty := schema.ImpliedType() - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ // This should not be called, because the configuration for the // data resource contains an unknown value for "foo". Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("ReadDataSource called, but should not have been")), @@ -2069,7 +2056,8 @@ func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) { func TestContext2Plan_computedList(t *testing.T) { m := testModule(t, "plan-computed-list") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.PlanResourceChangeFn = testDiffFn + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -2080,8 +2068,7 @@ func TestContext2Plan_computedList(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -2095,7 +2082,7 @@ func TestContext2Plan_computedList(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -2135,7 +2122,7 @@ func TestContext2Plan_computedMultiIndex(t *testing.T) { p := testProvider("aws") p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -2145,9 +2132,7 @@ func TestContext2Plan_computedMultiIndex(t *testing.T) { }, }, }, - } - - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -2161,7 +2146,7 @@ func TestContext2Plan_computedMultiIndex(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 3 { @@ -2216,7 +2201,7 @@ func TestContext2Plan_count(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 6 { @@ -2278,7 +2263,6 @@ func TestContext2Plan_count(t *testing.T) { func TestContext2Plan_countComputed(t *testing.T) { m := testModule(t, "plan-count-computed") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -2328,7 +2312,7 @@ func TestContext2Plan_countModuleStatic(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 3 { @@ -2382,7 +2366,7 @@ func TestContext2Plan_countModuleStaticGrandchild(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 3 { @@ -2436,7 +2420,7 @@ func TestContext2Plan_countIndex(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -2492,7 +2476,7 @@ func TestContext2Plan_countVar(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 4 { @@ -2542,7 +2526,7 @@ func TestContext2Plan_countVar(t *testing.T) { func TestContext2Plan_countZero(t *testing.T) { m := testModule(t, "plan-count-zero") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -2550,7 +2534,7 @@ func TestContext2Plan_countZero(t *testing.T) { }, }, }, - } + }) // This schema contains a DynamicPseudoType, and therefore can't go through any shim functions p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { @@ -2570,7 +2554,7 @@ func TestContext2Plan_countZero(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -2612,7 +2596,7 @@ func TestContext2Plan_countOneIndex(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -2691,7 +2675,7 @@ func TestContext2Plan_countDecreaseToOne(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 4 { @@ -2777,7 +2761,7 @@ func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 4 { @@ -2856,7 +2840,7 @@ func TestContext2Plan_countIncreaseFromOne(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 4 { @@ -2949,7 +2933,7 @@ func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 5 { @@ -3016,7 +3000,7 @@ func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) { func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) { m := testModule(t, "plan-count-splat-reference") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -3026,8 +3010,7 @@ func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -3077,7 +3060,7 @@ func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 6 { @@ -3120,7 +3103,6 @@ func TestContext2Plan_countIncreaseWithSplatReference(t *testing.T) { func TestContext2Plan_forEach(t *testing.T) { m := testModule(t, "plan-for-each") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -3133,7 +3115,7 @@ func TestContext2Plan_forEach(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 8 { @@ -3156,7 +3138,6 @@ func TestContext2Plan_forEachUnknownValue(t *testing.T) { // expect this to produce an error, but not to panic. m := testModule(t, "plan-for-each-unknown-value") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -3187,7 +3168,6 @@ func TestContext2Plan_forEachUnknownValue(t *testing.T) { func TestContext2Plan_destroy(t *testing.T) { m := testModule(t, "plan-destroy") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -3222,7 +3202,7 @@ func TestContext2Plan_destroy(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -3250,7 +3230,6 @@ func TestContext2Plan_destroy(t *testing.T) { func TestContext2Plan_moduleDestroy(t *testing.T) { m := testModule(t, "plan-module-destroy") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -3285,7 +3264,7 @@ func TestContext2Plan_moduleDestroy(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -3314,7 +3293,6 @@ func TestContext2Plan_moduleDestroy(t *testing.T) { func TestContext2Plan_moduleDestroyCycle(t *testing.T) { m := testModule(t, "plan-module-destroy-gh-1835") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn state := states.NewState() aModule := state.EnsureModule(addrs.RootModuleInstance.Child("a_module", addrs.NoKey)) @@ -3350,7 +3328,7 @@ func TestContext2Plan_moduleDestroyCycle(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -3378,7 +3356,6 @@ func TestContext2Plan_moduleDestroyCycle(t *testing.T) { func TestContext2Plan_moduleDestroyMultivar(t *testing.T) { m := testModule(t, "plan-module-destroy-multivar") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn state := states.NewState() child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) @@ -3413,7 +3390,7 @@ func TestContext2Plan_moduleDestroyMultivar(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -3446,7 +3423,7 @@ func TestContext2Plan_pathVar(t *testing.T) { m := testModule(t, "plan-path-var") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -3456,8 +3433,7 @@ func TestContext2Plan_pathVar(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -3471,7 +3447,7 @@ func TestContext2Plan_pathVar(t *testing.T) { t.Fatalf("err: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -3503,6 +3479,7 @@ func TestContext2Plan_pathVar(t *testing.T) { func TestContext2Plan_diffVar(t *testing.T) { m := testModule(t, "plan-diffvar") p := testProvider("aws") + p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) root.SetResourceInstanceCurrent( @@ -3522,14 +3499,12 @@ func TestContext2Plan_diffVar(t *testing.T) { State: state, }) - p.PlanResourceChangeFn = testDiffFn - plan, diags := ctx.Plan() if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -3576,7 +3551,6 @@ func TestContext2Plan_hook(t *testing.T) { m := testModule(t, "plan-good") h := new(MockHook) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Hooks: []Hook{h}, @@ -3604,7 +3578,6 @@ func TestContext2Plan_closeProvider(t *testing.T) { // "provider.aws". m := testModule(t, "plan-close-module-provider") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -3650,7 +3623,7 @@ func TestContext2Plan_orphan(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -3688,7 +3661,6 @@ func TestContext2Plan_orphan(t *testing.T) { func TestContext2Plan_shadowUuid(t *testing.T) { m := testModule(t, "plan-shadow-uuid") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -3732,7 +3704,7 @@ func TestContext2Plan_state(t *testing.T) { if len(plan.Changes.Resources) < 2 { t.Fatalf("bad: %#v", plan.Changes.Resources) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -3811,7 +3783,7 @@ func TestContext2Plan_taint(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -3847,7 +3819,7 @@ func TestContext2Plan_taint(t *testing.T) { func TestContext2Plan_taintIgnoreChanges(t *testing.T) { m := testModule(t, "plan-taint-ignore-changes") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -3857,9 +3829,7 @@ func TestContext2Plan_taintIgnoreChanges(t *testing.T) { }, }, }, - } - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -3885,7 +3855,7 @@ func TestContext2Plan_taintIgnoreChanges(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -3965,7 +3935,7 @@ func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 3 { @@ -4022,7 +3992,7 @@ func TestContext2Plan_targeted(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -4071,7 +4041,7 @@ func TestContext2Plan_targetedCrossModule(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -4108,7 +4078,7 @@ func TestContext2Plan_targetedCrossModule(t *testing.T) { func TestContext2Plan_targetedModuleWithProvider(t *testing.T) { m := testModule(t, "plan-targeted-module-with-provider") p := testProvider("null") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "key": {Type: cty.String, Optional: true}, @@ -4119,8 +4089,7 @@ func TestContext2Plan_targetedModuleWithProvider(t *testing.T) { Attributes: map[string]*configschema.Attribute{}, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -4137,7 +4106,7 @@ func TestContext2Plan_targetedModuleWithProvider(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["null_resource"] + schema := p.GetSchemaResponse.ResourceTypes["null_resource"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -4158,7 +4127,6 @@ func TestContext2Plan_targetedModuleWithProvider(t *testing.T) { func TestContext2Plan_targetedOrphan(t *testing.T) { m := testModule(t, "plan-targeted-orphan") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -4198,7 +4166,7 @@ func TestContext2Plan_targetedOrphan(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -4226,7 +4194,6 @@ func TestContext2Plan_targetedOrphan(t *testing.T) { func TestContext2Plan_targetedModuleOrphan(t *testing.T) { m := testModule(t, "plan-targeted-module-orphan") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn state := states.NewState() child := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) @@ -4266,7 +4233,7 @@ func TestContext2Plan_targetedModuleOrphan(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -4309,7 +4276,7 @@ func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -4347,7 +4314,6 @@ func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) { func TestContext2Plan_outputContainsTargetedResource(t *testing.T) { m := testModule(t, "plan-untargeted-resource-output") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -4383,7 +4349,6 @@ func TestContext2Plan_targetedOverTen(t *testing.T) { state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) - var expectedState []string for i := 0; i < 13; i++ { key := fmt.Sprintf("aws_instance.foo[%d]", i) id := fmt.Sprintf("i-abc%d", i) @@ -4397,7 +4362,6 @@ func TestContext2Plan_targetedOverTen(t *testing.T) { }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), ) - expectedState = append(expectedState, fmt.Sprintf("%s:\n ID = %s\n", key, id)) } ctx := testContext2(t, &ContextOpts{ @@ -4418,7 +4382,7 @@ func TestContext2Plan_targetedOverTen(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() for _, res := range plan.Changes.Resources { @@ -4435,7 +4399,6 @@ func TestContext2Plan_targetedOverTen(t *testing.T) { func TestContext2Plan_provider(t *testing.T) { m := testModule(t, "plan-provider") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn var value interface{} p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) { @@ -4517,7 +4480,7 @@ func TestContext2Plan_ignoreChanges(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -4590,7 +4553,7 @@ func TestContext2Plan_ignoreChangesWildcard(t *testing.T) { func TestContext2Plan_ignoreChangesInMap(t *testing.T) { p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_ignore_changes_map": { Attributes: map[string]*configschema.Attribute{ @@ -4598,15 +4561,13 @@ func TestContext2Plan_ignoreChangesInMap(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { return providers.PlanResourceChangeResponse{ PlannedState: req.ProposedNewState, } } - p.PlanResourceChangeFn = testDiffFn - s := states.BuildState(func(ss *states.SyncState) { ss.SetResourceInstanceCurrent( addrs.Resource{ @@ -4639,7 +4600,7 @@ func TestContext2Plan_ignoreChangesInMap(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["test_ignore_changes_map"] + schema := p.GetSchemaResponse.ResourceTypes["test_ignore_changes_map"].Block ty := schema.ImpliedType() if got, want := len(plan.Changes.Resources), 1; got != want { @@ -4702,7 +4663,7 @@ func TestContext2Plan_ignoreChangesSensitive(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -4729,7 +4690,7 @@ func TestContext2Plan_ignoreChangesSensitive(t *testing.T) { func TestContext2Plan_moduleMapLiteral(t *testing.T) { m := testModule(t, "plan-module-map-literal") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -4738,8 +4699,7 @@ func TestContext2Plan_moduleMapLiteral(t *testing.T) { }, }, }, - } - p.ApplyResourceChangeFn = testApplyFn + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { s := req.ProposedNewState.AsValueMap() m := s["tags"].AsValueMap() @@ -4770,7 +4730,7 @@ func TestContext2Plan_moduleMapLiteral(t *testing.T) { func TestContext2Plan_computedValueInMap(t *testing.T) { m := testModule(t, "plan-computed-value-in-map") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -4783,8 +4743,7 @@ func TestContext2Plan_computedValueInMap(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { resp = testDiffFn(req) @@ -4815,7 +4774,7 @@ func TestContext2Plan_computedValueInMap(t *testing.T) { } for _, res := range plan.Changes.Resources { - schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type] + schema := p.GetSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block ric, err := res.Decode(schema.ImpliedType()) if err != nil { @@ -4844,8 +4803,7 @@ func TestContext2Plan_computedValueInMap(t *testing.T) { func TestContext2Plan_moduleVariableFromSplat(t *testing.T) { m := testModule(t, "plan-module-variable-from-splat") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -4853,7 +4811,7 @@ func TestContext2Plan_moduleVariableFromSplat(t *testing.T) { }, }, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -4872,7 +4830,7 @@ func TestContext2Plan_moduleVariableFromSplat(t *testing.T) { } for _, res := range plan.Changes.Resources { - schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type] + schema := p.GetSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block ric, err := res.Decode(schema.ImpliedType()) if err != nil { @@ -4900,7 +4858,7 @@ func TestContext2Plan_moduleVariableFromSplat(t *testing.T) { func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) { m := testModule(t, "plan-cbd-depends-datasource") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -4917,7 +4875,7 @@ func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) { }, }, }, - } + }) p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { computedVal := req.ProposedNewState.GetAttr("computed") if computedVal.IsNull() { @@ -4955,9 +4913,9 @@ func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) { var schema *configschema.Block switch res.Addr.Resource.Resource.Mode { case addrs.DataResourceMode: - schema = p.GetSchemaReturn.DataSources[res.Addr.Resource.Resource.Type] + schema = p.GetSchemaResponse.DataSources[res.Addr.Resource.Resource.Type].Block case addrs.ManagedResourceMode: - schema = p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type] + schema = p.GetSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block } ric, err := res.Decode(schema.ImpliedType()) @@ -5004,9 +4962,7 @@ func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) { func TestContext2Plan_listOrder(t *testing.T) { m := testModule(t, "plan-list-order") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -5014,7 +4970,7 @@ func TestContext2Plan_listOrder(t *testing.T) { }, }, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -5051,8 +5007,7 @@ func TestContext2Plan_listOrder(t *testing.T) { func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) { m := testModule(t, "plan-ignore-changes-with-flatmaps") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -5068,7 +5023,7 @@ func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) { }, }, }, - } + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -5103,7 +5058,7 @@ func TestContext2Plan_ignoreChangesWithFlatmaps(t *testing.T) { } res := plan.Changes.Resources[0] - schema := p.GetSchemaReturn.ResourceTypes[res.Addr.Resource.Resource.Type] + schema := p.GetSchemaResponse.ResourceTypes[res.Addr.Resource.Resource.Type].Block ric, err := res.Decode(schema.ImpliedType()) if err != nil { @@ -5235,7 +5190,6 @@ func TestContext2Plan_resourceNestedCount(t *testing.T) { func TestContext2Plan_computedAttrRefTypeMismatch(t *testing.T) { m := testModule(t, "plan-computed-attr-ref-type-mismatch") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { var diags tfdiags.Diagnostics if req.TypeName == "aws_instance" { @@ -5248,7 +5202,6 @@ func TestContext2Plan_computedAttrRefTypeMismatch(t *testing.T) { Diagnostics: diags, } } - p.PlanResourceChangeFn = testDiffFn p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { if req.TypeName != "aws_ami_list" { t.Fatalf("Reached apply for unexpected resource type! %s", req.TypeName) @@ -5284,7 +5237,7 @@ func TestContext2Plan_computedAttrRefTypeMismatch(t *testing.T) { func TestContext2Plan_selfRef(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -5292,7 +5245,7 @@ func TestContext2Plan_selfRef(t *testing.T) { }, }, }, - } + }) m := testModule(t, "plan-self-ref") c := testContext2(t, &ContextOpts{ @@ -5321,7 +5274,7 @@ func TestContext2Plan_selfRef(t *testing.T) { func TestContext2Plan_selfRefMulti(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -5329,7 +5282,7 @@ func TestContext2Plan_selfRefMulti(t *testing.T) { }, }, }, - } + }) m := testModule(t, "plan-self-ref-multi") c := testContext2(t, &ContextOpts{ @@ -5358,7 +5311,7 @@ func TestContext2Plan_selfRefMulti(t *testing.T) { func TestContext2Plan_selfRefMultiAll(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -5366,7 +5319,7 @@ func TestContext2Plan_selfRefMultiAll(t *testing.T) { }, }, }, - } + }) m := testModule(t, "plan-self-ref-multi-all") c := testContext2(t, &ContextOpts{ @@ -5408,7 +5361,7 @@ output "out" { }) p := testProvider("aws") - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("data_id"), "foo": cty.StringVal("foo"), @@ -5455,7 +5408,7 @@ resource "aws_instance" "foo" { }) p := testProvider("aws") - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("data_id"), "foo": cty.StringVal("foo"), @@ -5544,7 +5497,7 @@ func TestContext2Plan_variableSensitivity(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -5599,13 +5552,19 @@ func TestContext2Plan_variableSensitivityModule(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, + Variables: InputValues{ + "another_var": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCaller, + }, + }, }) plan, diags := ctx.Plan() if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -5624,21 +5583,32 @@ func TestContext2Plan_variableSensitivityModule(t *testing.T) { switch i := ric.Addr.String(); i { case "module.child.aws_instance.foo": checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "foo": cty.StringVal("foo"), + "foo": cty.StringVal("foo"), + "value": cty.StringVal("boop"), }), ric.After) if len(res.ChangeSrc.BeforeValMarks) != 0 { t.Errorf("unexpected BeforeValMarks: %#v", res.ChangeSrc.BeforeValMarks) } - if len(res.ChangeSrc.AfterValMarks) != 1 { - t.Errorf("unexpected AfterValMarks: %#v", res.ChangeSrc.AfterValMarks) + if len(res.ChangeSrc.AfterValMarks) != 2 { + t.Errorf("expected AfterValMarks to contain two elements: %#v", res.ChangeSrc.AfterValMarks) continue } - pvm := res.ChangeSrc.AfterValMarks[0] - if got, want := pvm.Path, cty.GetAttrPath("foo"); !got.Equals(want) { - t.Errorf("unexpected path for mark\n got: %#v\nwant: %#v", got, want) + // validate that the after marks have "foo" and "value" + contains := func(pvmSlice []cty.PathValueMarks, stepName string) bool { + for _, pvm := range pvmSlice { + if pvm.Path.Equals(cty.GetAttrPath(stepName)) { + if pvm.Marks.Equal(cty.NewValueMarks("sensitive")) { + return true + } + } + } + return false } - if got, want := pvm.Marks, cty.NewValueMarks("sensitive"); !got.Equal(want) { - t.Errorf("unexpected value for mark\n got: %#v\nwant: %#v", got, want) + if !contains(res.ChangeSrc.AfterValMarks, "foo") { + t.Error("unexpected AfterValMarks to contain \"foo\" with sensitive mark") + } + if !contains(res.ChangeSrc.AfterValMarks, "value") { + t.Error("unexpected AfterValMarks to contain \"value\" with sensitive mark") } default: t.Fatal("unknown instance:", i) @@ -5667,7 +5637,7 @@ func objectVal(t *testing.T, schema *configschema.Block, m map[string]cty.Value) func TestContext2Plan_requiredModuleOutput(t *testing.T) { m := testModule(t, "plan-required-output") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_resource": { Attributes: map[string]*configschema.Attribute{ @@ -5676,8 +5646,7 @@ func TestContext2Plan_requiredModuleOutput(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -5691,7 +5660,7 @@ func TestContext2Plan_requiredModuleOutput(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["test_resource"] + schema := p.GetSchemaResponse.ResourceTypes["test_resource"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -5732,7 +5701,7 @@ func TestContext2Plan_requiredModuleOutput(t *testing.T) { func TestContext2Plan_requiredModuleObject(t *testing.T) { m := testModule(t, "plan-required-whole-mod") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_resource": { Attributes: map[string]*configschema.Attribute{ @@ -5741,8 +5710,7 @@ func TestContext2Plan_requiredModuleObject(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -5756,7 +5724,7 @@ func TestContext2Plan_requiredModuleObject(t *testing.T) { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["test_resource"] + schema := p.GetSchemaResponse.ResourceTypes["test_resource"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 2 { @@ -5889,7 +5857,6 @@ output"out" { }) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -5919,7 +5886,6 @@ resource "aws_instance" "foo" { }) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn targets := []addrs.Targetable{} target, diags := addrs.ParseTargetStr("module.mod[1].aws_instance.foo[0]") @@ -5983,7 +5949,6 @@ resource "aws_instance" "foo" { }) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn target, diags := addrs.ParseTargetStr("module.mod[1].aws_instance.foo") if diags.HasErrors() { @@ -6053,7 +6018,6 @@ resource "aws_instance" "foo" { }) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -6076,7 +6040,7 @@ data "test_data_source" "foo" {} }) p := new(MockProvider) - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ DataSources: map[string]*configschema.Block{ "test_data_source": { Attributes: map[string]*configschema.Attribute{ @@ -6091,9 +6055,9 @@ data "test_data_source" "foo" {} }, }, }, - } + }) - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("data_id"), "foo": cty.StringVal("foo"), @@ -6134,8 +6098,6 @@ data "test_data_source" "foo" {} // for_each can reference a resource with 0 instances func TestContext2Plan_scaleInForEach(t *testing.T) { p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn m := testModuleInline(t, map[string]string{ "main.tf": ` @@ -6203,7 +6165,7 @@ func TestContext2Plan_targetedModuleInstance(t *testing.T) { if diags.HasErrors() { t.Fatalf("unexpected errors: %s", diags.Err()) } - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() if len(plan.Changes.Resources) != 1 { @@ -6240,7 +6202,7 @@ data "test_data_source" "d" { `}) p := testProvider("test") - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("this"), "foo": cty.NullVal(cty.String), @@ -6271,8 +6233,6 @@ data "test_data_source" "d" { func TestContext2Plan_dataReferencesResource(t *testing.T) { p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("data source should not be read")) @@ -6320,7 +6280,6 @@ data "test_data_source" "e" { func TestContext2Plan_skipRefresh(t *testing.T) { p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn p.PlanResourceChangeFn = testDiffFn m := testModuleInline(t, map[string]string{ @@ -6366,8 +6325,6 @@ resource "test_instance" "a" { func TestContext2Plan_dataInModuleDependsOn(t *testing.T) { p := testProvider("test") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn readDataSourceB := false p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { @@ -6421,3 +6378,362 @@ data "test_data_source" "b" { t.Fatal("data source b was not read during plan") } } + +func TestContext2Plan_rpcDiagnostics(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + resp := testDiffFn(req) + resp.Diagnostics = resp.Diagnostics.Append(tfdiags.SimpleWarning("don't frobble")) + return resp + } + + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if len(diags) == 0 { + t.Fatal("expected warnings") + } + + for _, d := range diags { + des := d.Description().Summary + if !strings.Contains(des, "frobble") { + t.Fatalf(`expected frobble, got %q`, des) + } + } +} + +// ignore_changes needs to be re-applied to the planned value for provider +// using the LegacyTypeSystem +func TestContext2Plan_legacyProviderIgnoreChanges(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { + lifecycle { + ignore_changes = [data] + } +} +`, + }) + + p := testProvider("test") + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + // this provider "hashes" the data attribute as bar + m["data"] = cty.StringVal("bar") + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "data": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","data":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + State: state, + }) + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_validateIgnoreAll(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { + lifecycle { + ignore_changes = all + } +} +`, + }) + + p := testProvider("test") + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "data": {Type: cty.String, Optional: true}, + }, + }, + }, + }) + p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + var diags tfdiags.Diagnostics + if req.TypeName == "test_instance" { + if !req.Config.GetAttr("id").IsNull() { + diags = diags.Append(errors.New("id cannot be set in config")) + } + } + return providers.ValidateResourceTypeConfigResponse{ + Diagnostics: diags, + } + } + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","data":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + State: state, + }) + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } +} + +func TestContext2Plan_dataRemovalNoProvider(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +resource "test_instance" "a" { +} +`, + }) + + p := testProvider("test") + + state := states.NewState() + root := state.EnsureModule(addrs.RootModuleInstance) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("test_instance.a").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"a","data":"foo"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), + ) + + // the provider for this data source is no longer in the config, but that + // should not matter for state removal. + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("data.test_data_source.d").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"d"}`), + Dependencies: []addrs.ConfigResource{}, + }, + mustProviderConfig(`provider["registry.terraform.io/local/test"]`), + ) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + // We still need to be able to locate the provider to decode the + // state, since we do not know during init that this provider is + // only used for an orphaned data source. + addrs.NewProvider("registry.terraform.io", "local", "test"): testProviderFuncFixed(p), + }, + State: state, + }) + _, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } +} + +func TestContext2Plan_noSensitivityChange(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "sensitive_var" { + default = "hello" + sensitive = true +} + +resource "test_resource" "foo" { + value = var.sensitive_var + sensitive_value = var.sensitive_var +}`, + }) + + p := testProvider("test") + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + State: states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_resource", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo", "value":"hello", "sensitive_value":"hello"}`), + AttrSensitivePaths: []cty.PathValueMarks{ + {Path: cty.Path{cty.GetAttrStep{Name: "value"}}, Marks: cty.NewValueMarks("sensitive")}, + {Path: cty.Path{cty.GetAttrStep{Name: "sensitive_value"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + }, + addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("test"), + Module: addrs.RootModule, + }, + ) + }), + }) + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Resources { + if c.Action != plans.NoOp { + t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_variableCustomValidationsSensitive(t *testing.T) { + m := testModule(t, "validate-variable-custom-validations-child-sensitive") + + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + _, diags := ctx.Plan() + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), `Invalid value for variable: Value must not be "nope".`; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Plan_nullOutputNoOp(t *testing.T) { + // this should always plan a NoOp change for the output + m := testModuleInline(t, map[string]string{ + "main.tf": ` +output "planned" { + value = false ? 1 : null +} +`, + }) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + State: states.BuildState(func(s *states.SyncState) { + r := s.Module(addrs.RootModuleInstance) + r.SetOutputValue("planned", cty.NullVal(cty.DynamicPseudoType), false) + }), + }) + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Outputs { + if c.Action != plans.NoOp { + t.Fatalf("expected no changes, got %s for %q", c.Action, c.Addr) + } + } +} + +func TestContext2Plan_createOutput(t *testing.T) { + // this should always plan a NoOp change for the output + m := testModuleInline(t, map[string]string{ + "main.tf": ` +output "planned" { + value = 1 +} +`, + }) + + ctx := testContext2(t, &ContextOpts{ + Config: m, + State: states.NewState(), + }) + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + for _, c := range plan.Changes.Outputs { + if c.Action != plans.Create { + t.Fatalf("expected Create change, got %s for %q", c.Action, c.Addr) + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// NOTE: Due to the size of this file, new tests should be added to +// context_plan2_test.go. +//////////////////////////////////////////////////////////////////////////////// diff --git a/terraform/context_refresh_test.go b/terraform/context_refresh_test.go index b8efe480c..919e3452f 100644 --- a/terraform/context_refresh_test.go +++ b/terraform/context_refresh_test.go @@ -41,18 +41,16 @@ func TestContext2Refresh(t *testing.T) { State: state, }) - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() readState, err := hcl2shim.HCL2ValueFromFlatmap(map[string]string{"id": "foo", "foo": "baz"}, ty) if err != nil { t.Fatal(err) } - p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: readState, } - p.PlanResourceChangeFn = testDiffFn s, diags := ctx.Refresh() if diags.HasErrors() { @@ -105,7 +103,7 @@ func TestContext2Refresh_dynamicAttr(t *testing.T) { }) p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_instance": { Attributes: map[string]*configschema.Attribute{ @@ -113,7 +111,7 @@ func TestContext2Refresh_dynamicAttr(t *testing.T) { }, }, }, - } + }) p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { return providers.ReadResourceResponse{ NewState: readStateVal, @@ -132,7 +130,7 @@ func TestContext2Refresh_dynamicAttr(t *testing.T) { State: startingState, }) - schema := p.GetSchemaReturn.ResourceTypes["test_instance"] + schema := p.GetSchemaResponse.ResourceTypes["test_instance"].Block ty := schema.ImpliedType() s, diags := ctx.Refresh() @@ -169,7 +167,7 @@ func TestContext2Refresh_dataComputedModuleVar(t *testing.T) { return resp } - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_instance": { @@ -199,7 +197,7 @@ func TestContext2Refresh_dataComputedModuleVar(t *testing.T) { }, }, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -220,7 +218,7 @@ func TestContext2Refresh_dataComputedModuleVar(t *testing.T) { func TestContext2Refresh_targeted(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_elb": { @@ -252,7 +250,7 @@ func TestContext2Refresh_targeted(t *testing.T) { }, }, }, - } + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -282,7 +280,6 @@ func TestContext2Refresh_targeted(t *testing.T) { NewState: req.PriorState, } } - p.PlanResourceChangeFn = testDiffFn _, diags := ctx.Refresh() if diags.HasErrors() { @@ -297,7 +294,7 @@ func TestContext2Refresh_targeted(t *testing.T) { func TestContext2Refresh_targetedCount(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_elb": { @@ -329,7 +326,7 @@ func TestContext2Refresh_targetedCount(t *testing.T) { }, }, }, - } + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -361,7 +358,6 @@ func TestContext2Refresh_targetedCount(t *testing.T) { NewState: req.PriorState, } } - p.PlanResourceChangeFn = testDiffFn _, diags := ctx.Refresh() if diags.HasErrors() { @@ -384,7 +380,7 @@ func TestContext2Refresh_targetedCount(t *testing.T) { func TestContext2Refresh_targetedCountIndex(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_elb": { @@ -416,7 +412,7 @@ func TestContext2Refresh_targetedCountIndex(t *testing.T) { }, }, }, - } + }) state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -448,7 +444,6 @@ func TestContext2Refresh_targetedCountIndex(t *testing.T) { NewState: req.PriorState, } } - p.PlanResourceChangeFn = testDiffFn _, diags := ctx.Refresh() if diags.HasErrors() { @@ -463,7 +458,7 @@ func TestContext2Refresh_targetedCountIndex(t *testing.T) { func TestContext2Refresh_moduleComputedVar(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_instance": { @@ -479,8 +474,7 @@ func TestContext2Refresh_moduleComputedVar(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) m := testModule(t, "refresh-module-computed-var") ctx := testContext2(t, &ContextOpts{ @@ -513,11 +507,9 @@ func TestContext2Refresh_delete(t *testing.T) { State: state, }) - p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ - NewState: cty.NullVal(p.GetSchemaReturn.ResourceTypes["aws_instance"].ImpliedType()), + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.NullVal(p.GetSchemaResponse.ResourceTypes["aws_instance"].Block.ImpliedType()), } - p.PlanResourceChangeFn = testDiffFn s, diags := ctx.Refresh() if diags.HasErrors() { @@ -541,13 +533,11 @@ func TestContext2Refresh_ignoreUncreated(t *testing.T) { State: nil, }) - p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("foo"), }), } - p.PlanResourceChangeFn = testDiffFn _, diags := ctx.Refresh() if diags.HasErrors() { @@ -561,7 +551,6 @@ func TestContext2Refresh_ignoreUncreated(t *testing.T) { func TestContext2Refresh_hook(t *testing.T) { h := new(MockHook) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn m := testModule(t, "refresh-basic") state := states.NewState() @@ -623,7 +612,6 @@ func TestContext2Refresh_modules(t *testing.T) { NewState: new, } } - p.PlanResourceChangeFn = testDiffFn s, diags := ctx.Refresh() if diags.HasErrors() { @@ -640,8 +628,7 @@ func TestContext2Refresh_modules(t *testing.T) { func TestContext2Refresh_moduleInputComputedOutput(t *testing.T) { m := testModule(t, "refresh-module-input-computed-output") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_instance": { @@ -658,7 +645,7 @@ func TestContext2Refresh_moduleInputComputedOutput(t *testing.T) { }, }, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -675,7 +662,6 @@ func TestContext2Refresh_moduleInputComputedOutput(t *testing.T) { func TestContext2Refresh_moduleVarModule(t *testing.T) { m := testModule(t, "refresh-module-var-module") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -699,13 +685,11 @@ func TestContext2Refresh_noState(t *testing.T) { }, }) - p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("foo"), }), } - p.PlanResourceChangeFn = testDiffFn if _, diags := ctx.Refresh(); diags.HasErrors() { t.Fatalf("refresh errs: %s", diags.Err()) @@ -714,7 +698,8 @@ func TestContext2Refresh_noState(t *testing.T) { func TestContext2Refresh_output(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.PlanResourceChangeFn = testDiffFn + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_instance": { @@ -731,8 +716,7 @@ func TestContext2Refresh_output(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) m := testModule(t, "refresh-output") @@ -768,9 +752,8 @@ func TestContext2Refresh_outputPartial(t *testing.T) { // Refresh creates a partial plan for any instances that don't have // remote objects yet, to get stub values for interpolation. Therefore // we need to make DiffFn available to let that complete. - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_instance": { @@ -782,11 +765,10 @@ func TestContext2Refresh_outputPartial(t *testing.T) { }, }, }, - } + }) - p.ReadResourceFn = nil - p.ReadResourceResponse = providers.ReadResourceResponse{ - NewState: cty.NullVal(p.GetSchemaReturn.ResourceTypes["aws_instance"].ImpliedType()), + p.ReadResourceResponse = &providers.ReadResourceResponse{ + NewState: cty.NullVal(p.GetSchemaResponse.ResourceTypes["aws_instance"].Block.ImpliedType()), } state := states.NewState() @@ -829,7 +811,7 @@ func TestContext2Refresh_stateBasic(t *testing.T) { State: state, }) - schema := p.GetSchemaReturn.ResourceTypes["aws_instance"] + schema := p.GetSchemaResponse.ResourceTypes["aws_instance"].Block ty := schema.ImpliedType() readStateVal, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ @@ -839,9 +821,7 @@ func TestContext2Refresh_stateBasic(t *testing.T) { t.Fatal(err) } - p.ReadResourceFn = nil - p.PlanResourceChangeFn = testDiffFn - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: readStateVal, } @@ -875,7 +855,7 @@ func TestContext2Refresh_dataCount(t *testing.T) { resp.PlannedState = cty.ObjectVal(m) return resp } - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test": { Attributes: map[string]*configschema.Attribute{ @@ -887,7 +867,7 @@ func TestContext2Refresh_dataCount(t *testing.T) { DataSources: map[string]*configschema.Block{ "test": {}, }, - } + }) p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { return providers.ReadDataSourceResponse{ @@ -924,12 +904,12 @@ func TestContext2Refresh_dataState(t *testing.T) { } p := testProvider("null") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, DataSources: map[string]*configschema.Block{ "null_data_source": schema, }, - } + }) ctx := testContext2(t, &ContextOpts{ Config: m, @@ -949,7 +929,6 @@ func TestContext2Refresh_dataState(t *testing.T) { State: readStateVal, } } - p.PlanResourceChangeFn = testDiffFn s, diags := ctx.Refresh() if diags.HasErrors() { @@ -974,7 +953,7 @@ func TestContext2Refresh_dataState(t *testing.T) { func TestContext2Refresh_dataStateRefData(t *testing.T) { p := testProvider("null") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, DataSources: map[string]*configschema.Block{ "null_data_source": { @@ -994,7 +973,7 @@ func TestContext2Refresh_dataStateRefData(t *testing.T) { }, }, }, - } + }) m := testModule(t, "refresh-data-ref-data") state := states.NewState() @@ -1015,7 +994,6 @@ func TestContext2Refresh_dataStateRefData(t *testing.T) { State: cty.ObjectVal(m), } } - p.PlanResourceChangeFn = testDiffFn s, diags := ctx.Refresh() if diags.HasErrors() { @@ -1053,7 +1031,6 @@ func TestContext2Refresh_tainted(t *testing.T) { NewState: cty.ObjectVal(m), } } - p.PlanResourceChangeFn = testDiffFn s, diags := ctx.Refresh() if diags.HasErrors() { @@ -1076,9 +1053,6 @@ func TestContext2Refresh_tainted(t *testing.T) { // Providers was _empty_. func TestContext2Refresh_unknownProvider(t *testing.T) { m := testModule(t, "refresh-unknown-provider") - p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1094,7 +1068,7 @@ func TestContext2Refresh_unknownProvider(t *testing.T) { t.Fatal("successfully created context; want error") } - if !regexp.MustCompile(`Failed to instantiate provider ".+"`).MatchString(diags.Err().Error()) { + if !regexp.MustCompile(`failed to instantiate provider ".+"`).MatchString(diags.Err().Error()) { t.Fatalf("wrong error: %s", diags.Err()) } } @@ -1115,10 +1089,10 @@ func TestContext2Refresh_vars(t *testing.T) { }, } - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{"aws_instance": schema}, - } + }) m := testModule(t, "refresh-vars") state := states.NewState() @@ -1140,9 +1114,7 @@ func TestContext2Refresh_vars(t *testing.T) { t.Fatal(err) } - p.ReadResourceFn = nil - p.PlanResourceChangeFn = testDiffFn - p.ReadResourceResponse = providers.ReadResourceResponse{ + p.ReadResourceResponse = &providers.ReadResourceResponse{ NewState: readStateVal, } @@ -1195,7 +1167,6 @@ func TestContext2Refresh_orphanModule(t *testing.T) { NewState: req.PriorState, } } - p.PlanResourceChangeFn = testDiffFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1248,7 +1219,7 @@ func TestContext2Refresh_orphanModule(t *testing.T) { func TestContext2Validate(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{}, ResourceTypes: map[string]*configschema.Block{ "aws_instance": { @@ -1264,8 +1235,7 @@ func TestContext2Validate(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn + }) m := testModule(t, "validate-good") c := testContext2(t, &ContextOpts{ @@ -1284,8 +1254,6 @@ func TestContext2Validate(t *testing.T) { func TestContext2Refresh_updateProviderInState(t *testing.T) { m := testModule(t, "update-resource-provider") p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn - p.ApplyResourceChangeFn = testApplyFn state := states.NewState() root := state.EnsureModule(addrs.RootModuleInstance) @@ -1318,7 +1286,7 @@ aws_instance.bar: func TestContext2Refresh_schemaUpgradeFlatmap(t *testing.T) { m := testModule(t, "refresh-schema-upgrade") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_thing": { Attributes: map[string]*configschema.Attribute{ @@ -1332,13 +1300,12 @@ func TestContext2Refresh_schemaUpgradeFlatmap(t *testing.T) { ResourceTypeSchemaVersions: map[string]uint64{ "test_thing": 5, }, - } - p.UpgradeResourceStateResponse = providers.UpgradeResourceStateResponse{ + }) + p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ UpgradedState: cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("foo"), }), } - p.PlanResourceChangeFn = testDiffFn s := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( @@ -1405,7 +1372,7 @@ test_thing.bar: func TestContext2Refresh_schemaUpgradeJSON(t *testing.T) { m := testModule(t, "refresh-schema-upgrade") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_thing": { Attributes: map[string]*configschema.Attribute{ @@ -1419,13 +1386,12 @@ func TestContext2Refresh_schemaUpgradeJSON(t *testing.T) { ResourceTypeSchemaVersions: map[string]uint64{ "test_thing": 5, }, - } - p.UpgradeResourceStateResponse = providers.UpgradeResourceStateResponse{ + }) + p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ UpgradedState: cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("foo"), }), } - p.PlanResourceChangeFn = testDiffFn s := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( @@ -1503,7 +1469,6 @@ data "aws_data_source" "foo" { resp.State = req.Config return } - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -1527,7 +1492,7 @@ data "aws_data_source" "foo" { func TestContext2Refresh_dataResourceDependsOn(t *testing.T) { m := testModule(t, "plan-data-depends-on") p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_resource": { Attributes: map[string]*configschema.Attribute{ @@ -1543,9 +1508,8 @@ func TestContext2Refresh_dataResourceDependsOn(t *testing.T) { }, }, }, - } - p.PlanResourceChangeFn = testDiffFn - p.ReadDataSourceResponse = providers.ReadDataSourceResponse{ + }) + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ State: cty.ObjectVal(map[string]cty.Value{ "compute": cty.StringVal("value"), }), @@ -1653,8 +1617,6 @@ resource "aws_instance" "foo" { }) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -1722,8 +1684,6 @@ resource "aws_instance" "bar" { }) p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, diff --git a/terraform/context_test.go b/terraform/context_test.go index dde3ec5e9..c16dfe153 100644 --- a/terraform/context_test.go +++ b/terraform/context_test.go @@ -15,10 +15,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/configs/configload" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/internal/depsfile" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/plans/planfile" "github.com/hashicorp/terraform/providers" @@ -117,6 +119,181 @@ func TestNewContextRequiredVersion(t *testing.T) { } } +func TestNewContext_lockedDependencies(t *testing.T) { + configBeepGreaterThanOne := ` +terraform { + required_providers { + beep = { + source = "example.com/foo/beep" + version = ">= 1.0.0" + } + } +} +` + configBeepLessThanOne := ` +terraform { + required_providers { + beep = { + source = "example.com/foo/beep" + version = "< 1.0.0" + } + } +} +` + configBuiltin := ` +terraform { + required_providers { + terraform = { + source = "terraform.io/builtin/terraform" + } + } +} +` + locksBeepGreaterThanOne := ` +provider "example.com/foo/beep" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:does-not-match", + ] +} +` + configBeepBoop := ` +terraform { + required_providers { + beep = { + source = "example.com/foo/beep" + version = "< 1.0.0" # different from locks + } + boop = { + source = "example.com/foo/boop" + version = ">= 2.0.0" + } + } +} +` + locksBeepBoop := ` +provider "example.com/foo/beep" { + version = "1.0.0" + constraints = ">= 1.0.0" + hashes = [ + "h1:does-not-match", + ] +} +provider "example.com/foo/boop" { + version = "2.3.4" + constraints = ">= 2.0.0" + hashes = [ + "h1:does-not-match", + ] +} +` + beepAddr := addrs.MustParseProviderSourceString("example.com/foo/beep") + boopAddr := addrs.MustParseProviderSourceString("example.com/foo/boop") + + testCases := map[string]struct { + Config string + LockFile string + DevProviders []addrs.Provider + WantErr string + }{ + "dependencies met": { + Config: configBeepGreaterThanOne, + LockFile: locksBeepGreaterThanOne, + }, + "no locks given": { + Config: configBeepGreaterThanOne, + }, + "builtin provider with empty locks": { + Config: configBuiltin, + LockFile: `# This file is maintained automatically by "terraform init".`, + }, + "multiple providers, one in development": { + Config: configBeepBoop, + LockFile: locksBeepBoop, + DevProviders: []addrs.Provider{beepAddr}, + }, + "development provider with empty locks": { + Config: configBeepGreaterThanOne, + LockFile: `# This file is maintained automatically by "terraform init".`, + DevProviders: []addrs.Provider{beepAddr}, + }, + "multiple providers, one in development, one missing": { + Config: configBeepBoop, + LockFile: locksBeepGreaterThanOne, + DevProviders: []addrs.Provider{beepAddr}, + WantErr: `Provider requirements cannot be satisfied by locked dependencies: The following required providers are not installed: + +- example.com/foo/boop (>= 2.0.0) + +Please run "terraform init".`, + }, + "wrong provider version": { + Config: configBeepLessThanOne, + LockFile: locksBeepGreaterThanOne, + WantErr: `Provider requirements cannot be satisfied by locked dependencies: The following required providers are not installed: + +- example.com/foo/beep (< 1.0.0) + +Please run "terraform init".`, + }, + "empty locks": { + Config: configBeepGreaterThanOne, + LockFile: `# This file is maintained automatically by "terraform init".`, + WantErr: `Provider requirements cannot be satisfied by locked dependencies: The following required providers are not installed: + +- example.com/foo/beep (>= 1.0.0) + +Please run "terraform init".`, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + var locks *depsfile.Locks + if tc.LockFile != "" { + var diags tfdiags.Diagnostics + locks, diags = depsfile.LoadLocksFromBytes([]byte(tc.LockFile), "test.lock.hcl") + if len(diags) > 0 { + t.Fatalf("unexpected error loading locks file: %s", diags.Err()) + } + } + devProviders := make(map[addrs.Provider]struct{}) + for _, provider := range tc.DevProviders { + devProviders[provider] = struct{}{} + } + opts := &ContextOpts{ + Config: testModuleInline(t, map[string]string{ + "main.tf": tc.Config, + }), + LockedDependencies: locks, + ProvidersInDevelopment: devProviders, + Providers: map[addrs.Provider]providers.Factory{ + beepAddr: testProviderFuncFixed(testProvider("beep")), + boopAddr: testProviderFuncFixed(testProvider("boop")), + addrs.NewBuiltInProvider("terraform"): testProviderFuncFixed(testProvider("terraform")), + }, + } + + ctx, diags := NewContext(opts) + if tc.WantErr != "" { + if len(diags) == 0 { + t.Fatal("expected diags but none returned") + } + if got, want := diags.Err().Error(), tc.WantErr; got != want { + t.Errorf("wrong diags\n got: %s\nwant: %s", got, want) + } + } else { + if len(diags) > 0 { + t.Errorf("unexpected diags: %s", diags.Err()) + } + if ctx == nil { + t.Error("ctx is nil") + } + } + }) + } +} + func testContext2(t *testing.T, opts *ContextOpts) *Context { t.Helper() @@ -224,11 +401,7 @@ func testDiffFn(req providers.PlanResourceChangeRequest) (resp providers.PlanRes func testProvider(prefix string) *MockProvider { p := new(MockProvider) - p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { - return providers.ReadResourceResponse{NewState: req.PriorState} - } - - p.GetSchemaReturn = testProviderSchema(prefix) + p.GetSchemaResponse = testProviderSchema(prefix) return p } @@ -266,20 +439,6 @@ func checkStateString(t *testing.T, state *states.State, expected string) { } } -func resourceState(resourceType, resourceID string) *ResourceState { - providerResource := strings.Split(resourceType, "_") - return &ResourceState{ - Type: resourceType, - Primary: &InstanceState{ - ID: resourceID, - Attributes: map[string]string{ - "id": resourceID, - }, - }, - Provider: "provider." + providerResource[0], - } -} - // Test helper that gives a function 3 seconds to finish, assumes deadlock and // fails test if it does not. func testCheckDeadlock(t *testing.T, f func()) { @@ -302,8 +461,8 @@ func testCheckDeadlock(t *testing.T, f func()) { } } -func testProviderSchema(name string) *ProviderSchema { - return &ProviderSchema{ +func testProviderSchema(name string) *providers.GetSchemaResponse { + return getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "region": { @@ -436,15 +595,6 @@ func testProviderSchema(name string) *ProviderSchema { }, }, BlockTypes: map[string]*configschema.NestedBlock{ - "network_interface": { - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "network_interface_id": {Type: cty.String, Optional: true}, - "device_index": {Type: cty.Number, Optional: true}, - }, - }, - Nesting: configschema.NestingSet, - }, "nesting_single": { Block: configschema.Block{ Attributes: map[string]*configschema.Attribute{ @@ -554,8 +704,7 @@ func testProviderSchema(name string) *ProviderSchema { }, }, }, - } - + }) } // contextForPlanViaFile is a helper that creates a temporary plan file, then @@ -891,18 +1040,6 @@ func logDiagnostics(t *testing.T, diags tfdiags.Diagnostics) { } } -const testContextGraph = ` -root: root -aws_instance.bar - aws_instance.bar -> provider.aws -aws_instance.foo - aws_instance.foo -> provider.aws -provider.aws -root - root -> aws_instance.bar - root -> aws_instance.foo -` - const testContextRefreshModuleStr = ` aws_instance.web: (tainted) ID = bar diff --git a/terraform/context_validate_test.go b/terraform/context_validate_test.go index ec1f9b091..bb509a90c 100644 --- a/terraform/context_validate_test.go +++ b/terraform/context_validate_test.go @@ -18,13 +18,13 @@ import ( func TestContext2Validate_badCount(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{}, }, }, - } + }) m := testModule(t, "validate-bad-count") c := testContext2(t, &ContextOpts{ @@ -42,13 +42,13 @@ func TestContext2Validate_badCount(t *testing.T) { func TestContext2Validate_badResource_reference(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{}, }, }, - } + }) m := testModule(t, "validate-bad-resource-count") c := testContext2(t, &ContextOpts{ @@ -66,7 +66,7 @@ func TestContext2Validate_badResource_reference(t *testing.T) { func TestContext2Validate_badVar(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { Attributes: map[string]*configschema.Attribute{ @@ -75,7 +75,7 @@ func TestContext2Validate_badVar(t *testing.T) { }, }, }, - } + }) m := testModule(t, "validate-bad-var") c := testContext2(t, &ContextOpts{ @@ -94,7 +94,7 @@ func TestContext2Validate_badVar(t *testing.T) { func TestContext2Validate_varMapOverrideOld(t *testing.T) { m := testModule(t, "validate-module-pc-vars") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(&ProviderSchema{ Provider: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": {Type: cty.String, Optional: true}, @@ -105,7 +105,7 @@ func TestContext2Validate_varMapOverrideOld(t *testing.T) { Attributes: map[string]*configschema.Attribute{}, }, }, - } + }) _, diags := NewContext(&ContextOpts{ Config: m, @@ -133,25 +133,31 @@ func TestContext2Validate_varNoDefaultExplicitType(t *testing.T) { func TestContext2Validate_computedVar(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "value": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "value": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, }, }, } pt := testProvider("test") - pt.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + pt.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "value": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "value": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -186,19 +192,23 @@ func TestContext2Validate_computedVar(t *testing.T) { func TestContext2Validate_computedInFunction(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.Number, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.Number, Optional: true}, + }, }, }, }, - DataSources: map[string]*configschema.Block{ + DataSources: map[string]providers.Schema{ "aws_data_source": { - Attributes: map[string]*configschema.Attribute{ - "optional_attr": {Type: cty.String, Optional: true}, - "computed": {Type: cty.String, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "optional_attr": {Type: cty.String, Optional: true}, + "computed": {Type: cty.String, Computed: true}, + }, }, }, }, @@ -223,17 +233,21 @@ func TestContext2Validate_computedInFunction(t *testing.T) { // can be realized during a plan. func TestContext2Validate_countComputed(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, }, }, - DataSources: map[string]*configschema.Block{ + DataSources: map[string]providers.Schema{ "aws_data_source": { - Attributes: map[string]*configschema.Attribute{ - "compute": {Type: cty.String, Optional: true}, - "value": {Type: cty.String, Computed: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "compute": {Type: cty.String, Optional: true}, + "value": {Type: cty.String, Computed: true}, + }, }, }, }, @@ -255,14 +269,15 @@ func TestContext2Validate_countComputed(t *testing.T) { func TestContext2Validate_countNegative(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, }, }, } - m := testModule(t, "validate-count-negative") c := testContext2(t, &ContextOpts{ Config: m, @@ -279,16 +294,17 @@ func TestContext2Validate_countNegative(t *testing.T) { func TestContext2Validate_countVariable(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, } - m := testModule(t, "apply-count-variable") c := testContext2(t, &ContextOpts{ Config: m, @@ -306,16 +322,17 @@ func TestContext2Validate_countVariable(t *testing.T) { func TestContext2Validate_countVariableNoDefault(t *testing.T) { p := testProvider("aws") m := testModule(t, "validate-count-variable") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, } - _, diags := NewContext(&ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -330,16 +347,17 @@ func TestContext2Validate_countVariableNoDefault(t *testing.T) { func TestContext2Validate_moduleBadOutput(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, } - m := testModule(t, "validate-bad-module-output") c := testContext2(t, &ContextOpts{ Config: m, @@ -356,16 +374,17 @@ func TestContext2Validate_moduleBadOutput(t *testing.T) { func TestContext2Validate_moduleGood(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, } - m := testModule(t, "validate-good-module") c := testContext2(t, &ContextOpts{ Config: m, @@ -383,10 +402,12 @@ func TestContext2Validate_moduleGood(t *testing.T) { func TestContext2Validate_moduleBadResource(t *testing.T) { m := testModule(t, "validate-module-bad-rc") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, }, }, } @@ -398,7 +419,7 @@ func TestContext2Validate_moduleBadResource(t *testing.T) { }, }) - p.ValidateResourceTypeConfigResponse = providers.ValidateResourceTypeConfigResponse{ + p.ValidateResourceTypeConfigResponse = &providers.ValidateResourceTypeConfigResponse{ Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), } @@ -411,11 +432,13 @@ func TestContext2Validate_moduleBadResource(t *testing.T) { func TestContext2Validate_moduleDepsShouldNotCycle(t *testing.T) { m := testModule(t, "validate-module-deps-cycle") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -437,19 +460,23 @@ func TestContext2Validate_moduleDepsShouldNotCycle(t *testing.T) { func TestContext2Validate_moduleProviderVar(t *testing.T) { m := testModule(t, "validate-module-pc-vars") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": {Type: cty.String, Optional: true}, }, }, }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, } c := testContext2(t, &ContextOpts{ @@ -481,19 +508,23 @@ func TestContext2Validate_moduleProviderVar(t *testing.T) { func TestContext2Validate_moduleProviderInheritUnused(t *testing.T) { m := testModule(t, "validate-module-pc-inherit-unused") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": {Type: cty.String, Optional: true}, }, }, }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, } c := testContext2(t, &ContextOpts{ @@ -518,12 +549,14 @@ func TestContext2Validate_moduleProviderInheritUnused(t *testing.T) { func TestContext2Validate_orphans(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - "num": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "num": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -546,7 +579,7 @@ func TestContext2Validate_orphans(t *testing.T) { p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { var diags tfdiags.Diagnostics if req.Config.GetAttr("foo").IsNull() { - diags.Append(errors.New("foo is not set")) + diags = diags.Append(errors.New("foo is not set")) } return providers.ValidateResourceTypeConfigResponse{ Diagnostics: diags, @@ -562,15 +595,19 @@ func TestContext2Validate_orphans(t *testing.T) { func TestContext2Validate_providerConfig_bad(t *testing.T) { m := testModule(t, "validate-bad-pc") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, }, }, } @@ -582,7 +619,7 @@ func TestContext2Validate_providerConfig_bad(t *testing.T) { }, }) - p.PrepareProviderConfigResponse = providers.PrepareProviderConfigResponse{ + p.PrepareProviderConfigResponse = &providers.PrepareProviderConfigResponse{ Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), } @@ -595,18 +632,22 @@ func TestContext2Validate_providerConfig_bad(t *testing.T) { } } -func TestContext2Validate_providerConfig_badEmpty(t *testing.T) { - m := testModule(t, "validate-bad-pc-empty") +func TestContext2Validate_providerConfig_skippedEmpty(t *testing.T) { + m := testModule(t, "validate-skipped-pc-empty") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, }, }, } @@ -618,28 +659,32 @@ func TestContext2Validate_providerConfig_badEmpty(t *testing.T) { }, }) - p.PrepareProviderConfigResponse = providers.PrepareProviderConfigResponse{ - Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), + p.PrepareProviderConfigResponse = &providers.PrepareProviderConfigResponse{ + Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("should not be called")), } diags := c.Validate() - if !diags.HasErrors() { - t.Fatalf("succeeded; want error") + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } } func TestContext2Validate_providerConfig_good(t *testing.T) { m := testModule(t, "validate-bad-pc") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, }, }, } @@ -663,15 +708,19 @@ func TestContext2Validate_requiredProviderConfig(t *testing.T) { m := testModule(t, "validate-required-provider-config") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "required_attribute": {Type: cty.String, Required: true}, + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "required_attribute": {Type: cty.String, Required: true}, + }, }, }, - ResourceTypes: map[string]*configschema.Block{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + }, }, }, } @@ -692,11 +741,13 @@ func TestContext2Validate_requiredProviderConfig(t *testing.T) { func TestContext2Validate_provisionerConfig_bad(t *testing.T) { m := testModule(t, "validate-bad-prov-conf") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -709,12 +760,12 @@ func TestContext2Validate_provisionerConfig_bad(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) - p.PrepareProviderConfigResponse = providers.PrepareProviderConfigResponse{ + p.PrepareProviderConfigResponse = &providers.PrepareProviderConfigResponse{ Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), } @@ -727,11 +778,13 @@ func TestContext2Validate_provisionerConfig_bad(t *testing.T) { func TestContext2Validate_badResourceConnection(t *testing.T) { m := testModule(t, "validate-bad-resource-connection") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -744,7 +797,7 @@ func TestContext2Validate_badResourceConnection(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -759,11 +812,13 @@ func TestContext2Validate_badResourceConnection(t *testing.T) { func TestContext2Validate_badProvisionerConnection(t *testing.T) { m := testModule(t, "validate-bad-prov-connection") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -776,7 +831,7 @@ func TestContext2Validate_badProvisionerConnection(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -791,26 +846,30 @@ func TestContext2Validate_badProvisionerConnection(t *testing.T) { func TestContext2Validate_provisionerConfig_good(t *testing.T) { m := testModule(t, "validate-bad-prov-conf") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": { + p.GetSchemaResponse = &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ Attributes: map[string]*configschema.Attribute{ "foo": {Type: cty.String, Optional: true}, }, }, }, + ResourceTypes: map[string]providers.Schema{ + "aws_instance": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + }, + }, + }, } pr := simpleMockProvisioner() pr.ValidateProvisionerConfigFn = func(req provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { var diags tfdiags.Diagnostics if req.Config.GetAttr("test_string").IsNull() { - diags.Append(errors.New("test_string is not set")) + diags = diags.Append(errors.New("test_string is not set")) } return provisioners.ValidateProvisionerConfigResponse{ Diagnostics: diags, @@ -822,7 +881,7 @@ func TestContext2Validate_provisionerConfig_good(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, }) @@ -836,16 +895,17 @@ func TestContext2Validate_provisionerConfig_good(t *testing.T) { func TestContext2Validate_requiredVar(t *testing.T) { m := testModule(t, "validate-required-var") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "ami": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "ami": {Type: cty.String, Optional: true}, + }, }, }, }, } - _, diags := NewContext(&ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -861,16 +921,17 @@ func TestContext2Validate_requiredVar(t *testing.T) { func TestContext2Validate_resourceConfig_bad(t *testing.T) { m := testModule(t, "validate-bad-rc") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, } - c := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -878,7 +939,7 @@ func TestContext2Validate_resourceConfig_bad(t *testing.T) { }, }) - p.ValidateResourceTypeConfigResponse = providers.ValidateResourceTypeConfigResponse{ + p.ValidateResourceTypeConfigResponse = &providers.ValidateResourceTypeConfigResponse{ Diagnostics: tfdiags.Diagnostics{}.Append(fmt.Errorf("bad")), } @@ -891,16 +952,17 @@ func TestContext2Validate_resourceConfig_bad(t *testing.T) { func TestContext2Validate_resourceConfig_good(t *testing.T) { m := testModule(t, "validate-bad-rc") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, } - c := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -916,12 +978,14 @@ func TestContext2Validate_resourceConfig_good(t *testing.T) { func TestContext2Validate_tainted(t *testing.T) { p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - "num": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "num": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -943,7 +1007,7 @@ func TestContext2Validate_tainted(t *testing.T) { p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { var diags tfdiags.Diagnostics if req.Config.GetAttr("foo").IsNull() { - diags.Append(errors.New("foo is not set")) + diags = diags.Append(errors.New("foo is not set")) } return providers.ValidateResourceTypeConfigResponse{ Diagnostics: diags, @@ -960,14 +1024,14 @@ func TestContext2Validate_targetedDestroy(t *testing.T) { m := testModule(t, "validate-targeted") p := testProvider("aws") pr := simpleMockProvisioner() - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - "num": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + "num": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -983,7 +1047,7 @@ func TestContext2Validate_targetedDestroy(t *testing.T) { Providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), }, - Provisioners: map[string]ProvisionerFactory{ + Provisioners: map[string]provisioners.Factory{ "shell": testProvisionerFuncFixed(pr), }, State: state, @@ -1004,11 +1068,13 @@ func TestContext2Validate_targetedDestroy(t *testing.T) { func TestContext2Validate_varRefUnknown(t *testing.T) { m := testModule(t, "validate-variable-ref") p := testProvider("aws") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -1049,13 +1115,13 @@ func TestContext2Validate_interpolateVar(t *testing.T) { m := testModule(t, "input-interpolate-var") p := testProvider("null") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "template_file": { - Attributes: map[string]*configschema.Attribute{ - "template": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "template": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -1082,13 +1148,13 @@ func TestContext2Validate_interpolateComputedModuleVarDef(t *testing.T) { m := testModule(t, "validate-computed-module-var-ref") p := testProvider("aws") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "aws_instance": { - Attributes: map[string]*configschema.Attribute{ - "attr": {Type: cty.String, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "attr": {Type: cty.String, Optional: true}, + }, }, }, }, @@ -1114,8 +1180,6 @@ func TestContext2Validate_interpolateMap(t *testing.T) { m := testModule(t, "issue-9549") p := testProvider("template") - p.ApplyResourceChangeFn = testApplyFn - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, @@ -1131,6 +1195,71 @@ func TestContext2Validate_interpolateMap(t *testing.T) { } } +func TestContext2Validate_varSensitive(t *testing.T) { + // Smoke test through validate where a variable has sensitive applied + m := testModuleInline(t, map[string]string{ + "main.tf": ` +variable "foo" { + default = "xyz" + sensitive = true +} + +variable "bar" { + sensitive = true +} + +data "aws_data_source" "bar" { + foo = var.bar +} + +resource "aws_instance" "foo" { + foo = var.foo +} +`, + }) + + p := testProvider("aws") + p.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + // Providers receive unmarked values + if got, want := req.Config.GetAttr("foo"), cty.UnknownVal(cty.String); !got.RawEquals(want) { + t.Fatalf("wrong value for foo\ngot: %#v\nwant: %#v", got, want) + } + return providers.ValidateResourceTypeConfigResponse{} + } + p.ValidateDataSourceConfigFn = func(req providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { + if got, want := req.Config.GetAttr("foo"), cty.UnknownVal(cty.String); !got.RawEquals(want) { + t.Fatalf("wrong value for foo\ngot: %#v\nwant: %#v", got, want) + } + return providers.ValidateDataSourceConfigResponse{} + } + + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + Variables: InputValues{ + "bar": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCaller, + }, + }, + }) + + diags := ctx.Validate() + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if !p.ValidateResourceTypeConfigCalled { + t.Fatal("expected ValidateResourceTypeConfigFn to be called") + } + + if !p.ValidateDataSourceConfigCalled { + t.Fatal("expected ValidateDataSourceConfigFn to be called") + } +} + // Manually validate using the new PlanGraphBuilder func TestContext2Validate_PlanGraphBuilder(t *testing.T) { fixture := contextFixtureApplyVars(t) @@ -1208,7 +1337,7 @@ output "out" { } // Should get this error: // Unsupported attribute: This object does not have an attribute named "missing" - if got, want := diags.Err().Error(), "Unsupported attribute"; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), "Unsupported attribute"; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1245,7 +1374,47 @@ resource "aws_instance" "foo" { } // Should get this error: // Unsupported attribute: This object does not have an attribute named "missing" - if got, want := diags.Err().Error(), "Unsupported attribute"; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), "Unsupported attribute"; !strings.Contains(got, want) { + t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) + } +} + +func TestContext2Validate_invalidSensitiveModuleOutput(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "child/main.tf": ` +variable "foo" { + default = "xyz" + sensitive = true +} + +output "out" { + value = var.foo +}`, + "main.tf": ` +module "child" { + source = "./child" +} + +resource "aws_instance" "foo" { + foo = module.child.out +}`, + }) + + p := testProvider("aws") + ctx := testContext2(t, &ContextOpts{ + Config: m, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p), + }, + }) + + diags := ctx.Validate() + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + // Should get this error: + // Output refers to sensitive values: Expressions used in outputs can only refer to sensitive values if the sensitive attribute is true. + if got, want := diags.Err().Error(), "Output refers to sensitive values"; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1274,7 +1443,7 @@ output "out" { } // Should get this error: // Invalid resource count attribute: The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(aws_instance.test) to count resource instances. - if got, want := diags.Err().Error(), "Invalid resource count attribute:"; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), "Invalid resource count attribute:"; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1305,7 +1474,7 @@ output "out" { } // Should get this error: // Reference to undeclared module: No module call named "foo" is declared in the root module. - if got, want := diags.Err().Error(), "Reference to undeclared module:"; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), "Reference to undeclared module:"; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1336,7 +1505,7 @@ output "out" { } // Should get this error: // Reference to undeclared module: No module call named "foo" is declared in the root module. - if got, want := diags.Err().Error(), "Reference to undeclared module:"; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), "Reference to undeclared module:"; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1366,7 +1535,7 @@ resource "test_instance" "bar" { } // Should get this error: // Reference to undeclared module: No module call named "foo" is declared in the root module. - if got, want := diags.Err().Error(), "Reference to undeclared resource:"; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), "Reference to undeclared resource:"; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1399,7 +1568,7 @@ resource "test_instance" "bar" { } // Should get this error: // Reference to undeclared module: No module call named "foo" is declared in the root module. - if got, want := diags.Err().Error(), `no argument, nested block, or exported attribute named "does_not_exist_in_schema"`; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), `no argument, nested block, or exported attribute named "does_not_exist_in_schema"`; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1422,7 +1591,7 @@ func TestContext2Validate_variableCustomValidationsFail(t *testing.T) { if !diags.HasErrors() { t.Fatal("succeeded; want errors") } - if got, want := diags.Err().Error(), `Invalid value for variable: Value must not be "nope".`; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), `Invalid value for variable: Value must not be "nope".`; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1514,7 +1683,6 @@ resource "aws_instance" "foo" { }) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1543,7 +1711,6 @@ resource "aws_instance" "foo" { }) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1555,7 +1722,7 @@ resource "aws_instance" "foo" { if !diags.HasErrors() { t.Fatal("succeeded; want errors") } - if got, want := diags.Err().Error(), `Invalid count argument`; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), `Invalid count argument`; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1575,7 +1742,6 @@ resource "aws_instance" "foo" { }) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1587,7 +1753,7 @@ resource "aws_instance" "foo" { if !diags.HasErrors() { t.Fatal("succeeded; want errors") } - if got, want := diags.Err().Error(), `Invalid for_each argument`; strings.Index(got, want) == -1 { + if got, want := diags.Err().Error(), `Invalid for_each argument`; !strings.Contains(got, want) { t.Fatalf("wrong error:\ngot: %s\nwant: message containing %q", got, want) } } @@ -1658,7 +1824,6 @@ output "out" { }) p := testProvider("aws") - p.PlanResourceChangeFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1752,31 +1917,32 @@ output "out" { } } -func TestContext2Validate_invalidIgnoreChanges(t *testing.T) { +func TestContext2Validate_rpcDiagnostics(t *testing.T) { // validate module and output depends_on m := testModuleInline(t, map[string]string{ "main.tf": ` resource "test_instance" "a" { - lifecycle { - ignore_changes = [foo] - } } - `, }) p := testProvider("test") - p.GetSchemaReturn = &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ + p.GetSchemaResponse = &providers.GetSchemaResponse{ + ResourceTypes: map[string]providers.Schema{ "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "foo": {Type: cty.String, Computed: true, Optional: true}, + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, }, }, }, } + p.ValidateResourceTypeConfigResponse = &providers.ValidateResourceTypeConfigResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(tfdiags.SimpleWarning("don't frobble")), + } + ctx := testContext2(t, &ContextOpts{ Config: m, Providers: map[addrs.Provider]providers.Factory{ @@ -1784,14 +1950,18 @@ resource "test_instance" "a" { }, }) diags := ctx.Validate() - if !diags.HasErrors() { - t.Fatal("succeeded; want errors") + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + if len(diags) == 0 { + t.Fatal("expected warnings") } for _, d := range diags { des := d.Description().Summary - if !strings.Contains(des, "Cannot ignore") { - t.Fatalf(`expected "Invalid depends_on reference", got %q`, des) + if !strings.Contains(des, "frobble") { + t.Fatalf(`expected frobble, got %q`, des) } } } diff --git a/terraform/eval_apply.go b/terraform/eval_apply.go deleted file mode 100644 index 67e559484..000000000 --- a/terraform/eval_apply.go +++ /dev/null @@ -1,793 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "reflect" - "strings" - - multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalApply is an EvalNode implementation that writes the diff to -// the full diff. -type EvalApply struct { - Addr addrs.ResourceInstance - Config *configs.Resource - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - ProviderSchema **ProviderSchema - Output **states.ResourceInstanceObject - CreateNew *bool - Error *error - CreateBeforeDestroy bool -} - -// TODO: test -func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - - change := *n.Change - provider := *n.Provider - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - if state == nil { - state = &states.ResourceInstanceObject{} - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - if n.CreateNew != nil { - *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) - } - - configVal := cty.NullVal(cty.DynamicPseudoType) - if n.Config != nil { - var configDiags tfdiags.Diagnostics - forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - - if !configVal.IsWhollyKnown() { - return nil, fmt.Errorf( - "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", - absAddr, - ) - } - - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - if n.ProviderMetas != nil { - log.Printf("[DEBUG] EvalApply: ProviderMeta config value set") - if m, ok := n.ProviderMetas[n.ProviderAddr.Provider]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - log.Printf("[DEBUG] EvalApply: no ProviderMeta schema") - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ProviderAddr.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - log.Printf("[DEBUG] EvalApply: ProviderMeta schema found") - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - } - } - - log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) - - // If our config, Before or After value contain any marked values, - // ensure those are stripped out before sending - // this to the provider - unmarkedConfigVal, _ := configVal.UnmarkDeep() - unmarkedBefore, beforePaths := change.Before.UnmarkDeepWithPaths() - unmarkedAfter, afterPaths := change.After.UnmarkDeepWithPaths() - - // If we have an Update action, our before and after values are equal, - // and only differ on their sensitivity, the newVal is the after val - // and we should not communicate with the provider or perform further action. - eqV := unmarkedBefore.Equals(unmarkedAfter) - eq := eqV.IsKnown() && eqV.True() - if change.Action == plans.Update && eq && !reflect.DeepEqual(beforePaths, afterPaths) { - return nil, diags.ErrWithWarnings() - } - - resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: unmarkedBefore, - Config: unmarkedConfigVal, - PlannedState: unmarkedAfter, - PlannedPrivate: change.Private, - ProviderMeta: metaConfigVal, - }) - applyDiags := resp.Diagnostics - if n.Config != nil { - applyDiags = applyDiags.InConfigBody(n.Config.Config) - } - diags = diags.Append(applyDiags) - - // Even if there are errors in the returned diagnostics, the provider may - // have returned a _partial_ state for an object that already exists but - // failed to fully configure, and so the remaining code must always run - // to completion but must be defensive against the new value being - // incomplete. - newVal := resp.NewState - - // If we have paths to mark, mark those on this new value - if len(afterPaths) > 0 { - newVal = newVal.MarkWithPaths(afterPaths) - } - - if newVal == cty.NilVal { - // Providers are supposed to return a partial new value even when errors - // occur, but sometimes they don't and so in that case we'll patch that up - // by just using the prior state, so we'll at least keep track of the - // object for the user to retry. - newVal = change.Before - - // As a special case, we'll set the new value to null if it looks like - // we were trying to execute a delete, because the provider in this case - // probably left the newVal unset intending it to be interpreted as "null". - if change.After.IsNull() { - newVal = cty.NullVal(schema.ImpliedType()) - } - - // Ideally we'd produce an error or warning here if newVal is nil and - // there are no errors in diags, because that indicates a buggy - // provider not properly reporting its result, but unfortunately many - // of our historical test mocks behave in this way and so producing - // a diagnostic here fails hundreds of tests. Instead, we must just - // silently retain the old value for now. Returning a nil value with - // no errors is still always considered a bug in the provider though, - // and should be fixed for any "real" providers that do it. - } - - var conformDiags tfdiags.Diagnostics - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - conformDiags = conformDiags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - diags = diags.Append(conformDiags) - if conformDiags.HasErrors() { - // Bail early in this particular case, because an object that doesn't - // conform to the schema can't be saved in the state anyway -- the - // serializer will reject it. - return nil, diags.Err() - } - - // After this point we have a type-conforming result object and so we - // must always run to completion to ensure it can be saved. If n.Error - // is set then we must not return a non-nil error, in order to allow - // evaluation to continue to a later point where our state object will - // be saved. - - // By this point there must not be any unknown values remaining in our - // object, because we've applied the change and we can't save unknowns - // in our persistent state. If any are present then we will indicate an - // error (which is always a bug in the provider) but we will also replace - // them with nulls so that we can successfully save the portions of the - // returned value that are known. - if !newVal.IsWhollyKnown() { - // To generate better error messages, we'll go for a walk through the - // value and make a separate diagnostic for each unknown value we - // find. - cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { - if !val.IsKnown() { - pathStr := tfdiags.FormatCtyPath(path) - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", - n.Addr.Absolute(ctx.Path()), pathStr, - ), - )) - } - return true, nil - }) - - // NOTE: This operation can potentially be lossy if there are multiple - // elements in a set that differ only by unknown values: after - // replacing with null these will be merged together into a single set - // element. Since we can only get here in the presence of a provider - // bug, we accept this because storing a result here is always a - // best-effort sort of thing. - newVal = cty.UnknownAsNull(newVal) - } - - if change.Action != plans.Delete && !diags.HasErrors() { - // Only values that were marked as unknown in the planned value are allowed - // to change during the apply operation. (We do this after the unknown-ness - // check above so that we also catch anything that became unknown after - // being known during plan.) - // - // If we are returning other errors anyway then we'll give this - // a pass since the other errors are usually the explanation for - // this one and so it's more helpful to let the user focus on the - // root cause rather than distract with this extra problem. - if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.Provider.String(), absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - - // The sort of inconsistency we won't catch here is if a known value - // in the plan is changed during apply. That can cause downstream - // problems because a dependent resource would make its own plan based - // on the planned value, and thus get a different result during the - // apply phase. This will usually lead to a "Provider produced invalid plan" - // error that incorrectly blames the downstream resource for the change. - - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent result after apply", - fmt.Sprintf( - "When applying changes to %s, provider %q produced an unexpected new value: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.Provider.String(), tfdiags.FormatError(err), - ), - )) - } - } - } - } - - // If a provider returns a null or non-null object at the wrong time then - // we still want to save that but it often causes some confusing behaviors - // where it seems like Terraform is failing to take any action at all, - // so we'll generate some errors to draw attention to it. - if !diags.HasErrors() { - if change.Action == plans.Delete && !newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - if change.Action != plans.Delete && newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider returned invalid result object after apply", - fmt.Sprintf( - "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", - change.Action, n.Addr.Absolute(ctx.Path()), - ), - )) - } - } - - newStatus := states.ObjectReady - - // Sometimes providers return a null value when an operation fails for some - // reason, but we'd rather keep the prior state so that the error can be - // corrected on a subsequent run. We must only do this for null new value - // though, or else we may discard partial updates the provider was able to - // complete. - if diags.HasErrors() && newVal.IsNull() { - // Otherwise, we'll continue but using the prior state as the new value, - // making this effectively a no-op. If the item really _has_ been - // deleted then our next refresh will detect that and fix it up. - // If change.Action is Create then change.Before will also be null, - // which is fine. - newVal = change.Before - - // If we're recovering the previous state, we also want to restore the - // the tainted status of the object. - if state.Status == states.ObjectTainted { - newStatus = states.ObjectTainted - } - } - - var newState *states.ResourceInstanceObject - if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case - newState = &states.ResourceInstanceObject{ - Status: newStatus, - Value: newVal, - Private: resp.Private, - CreateBeforeDestroy: n.CreateBeforeDestroy, - } - } - - // Write the final state - if n.Output != nil { - *n.Output = newState - } - - if diags.HasErrors() { - // If the caller provided an error pointer then they are expected to - // handle the error some other way and we treat our own result as - // success. - if n.Error != nil { - err := diags.Err() - *n.Error = err - log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) - return nil, nil - } - } - - // we have to drop warning-only diagnostics for now - if diags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - // log any warnings since we can't return them - if e := diags.ErrWithWarnings(); e != nil { - log.Printf("[WARN] EvalApply %s: %v", n.Addr, e) - } - - return nil, nil -} - -// EvalApplyPre is an EvalNode implementation that does the pre-Apply work -type EvalApplyPre struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { - change := *n.Change - absAddr := n.Addr.Absolute(ctx.Path()) - - if change == nil { - panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) - } - - if resourceHasUserVisibleApply(n.Addr) { - priorState := change.Before - plannedNewState := change.After - - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// EvalApplyPost is an EvalNode implementation that does the post-Apply work -type EvalApplyPost struct { - Addr addrs.ResourceInstance - Gen states.Generation - State **states.ResourceInstanceObject - Error *error -} - -// TODO: test -func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - - if resourceHasUserVisibleApply(n.Addr) { - absAddr := n.Addr.Absolute(ctx.Path()) - var newState cty.Value - if state != nil { - newState = state.Value - } else { - newState = cty.NullVal(cty.DynamicPseudoType) - } - var err error - if n.Error != nil { - err = *n.Error - } - - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(absAddr, n.Gen, newState, err) - }) - if hookErr != nil { - return nil, hookErr - } - } - - return nil, *n.Error -} - -// EvalMaybeTainted is an EvalNode that takes the planned change, new value, -// and possible error from an apply operation and produces a new instance -// object marked as tainted if it appears that a create operation has failed. -// -// This EvalNode never returns an error, to ensure that a subsequent EvalNode -// can still record the possibly-tainted object in the state. -type EvalMaybeTainted struct { - Addr addrs.ResourceInstance - Gen states.Generation - Change **plans.ResourceInstanceChange - State **states.ResourceInstanceObject - Error *error -} - -func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil || n.Change == nil || n.Error == nil { - return nil, nil - } - - state := *n.State - change := *n.Change - err := *n.Error - - // nothing to do if everything went as planned - if err == nil { - return nil, nil - } - - if state != nil && state.Status == states.ObjectTainted { - log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) - return nil, nil - } - - if change.Action == plans.Create { - // If there are errors during a _create_ then the object is - // in an undefined state, and so we'll mark it as tainted so - // we can try again on the next run. - // - // We don't do this for other change actions because errors - // during updates will often not change the remote object at all. - // If there _were_ changes prior to the error, it's the provider's - // responsibility to record the effect of those changes in the - // object value it returned. - log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) - *n.State = state.AsTainted() - } - - return nil, nil -} - -// resourceHasUserVisibleApply returns true if the given resource is one where -// apply actions should be exposed to the user. -// -// Certain resources do apply actions only as an implementation detail, so -// these should not be advertised to code outside of this package. -func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { - // Only managed resources have user-visible apply actions. - // In particular, this excludes data resources since we "apply" these - // only as an implementation detail of removing them from state when - // they are destroyed. (When reading, they don't get here at all because - // we present them as "Refresh" actions.) - return addr.ContainingResource().Mode == addrs.ManagedResourceMode -} - -// EvalApplyProvisioners is an EvalNode implementation that executes -// the provisioners for a resource. -// -// TODO(mitchellh): This should probably be split up into a more fine-grained -// ApplyProvisioner (single) that is looped over. -type EvalApplyProvisioners struct { - Addr addrs.ResourceInstance - State **states.ResourceInstanceObject - ResourceConfig *configs.Resource - CreateNew *bool - Error *error - - // When is the type of provisioner to run at this point - When configs.ProvisionerWhen -} - -// TODO: test -func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - if state == nil { - log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) - return nil, nil - } - if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { - // If we're not creating a new resource, then don't run provisioners - log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) - return nil, nil - } - if state.Status == states.ObjectTainted { - // No point in provisioning an object that is already tainted, since - // it's going to get recreated on the next apply anyway. - log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) - return nil, nil - } - - provs := n.filterProvisioners() - if len(provs) == 0 { - // We have no provisioners, so don't do anything - return nil, nil - } - - if n.Error != nil && *n.Error != nil { - // We're already tainted, so just return out - return nil, nil - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - err := n.apply(ctx, provs) - if err != nil { - *n.Error = multierror.Append(*n.Error, err) - if n.Error == nil { - return nil, err - } else { - log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) - return nil, nil - } - } - - { - // Call post hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstance(absAddr, state.Value) - }) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -// filterProvisioners filters the provisioners on the resource to only -// the provisioners specified by the "when" option. -func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { - // Fast path the zero case - if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { - return nil - } - - if len(n.ResourceConfig.Managed.Provisioners) == 0 { - return nil - } - - result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) - for _, p := range n.ResourceConfig.Managed.Provisioners { - if p.When == n.When { - result = append(result, p) - } - } - - return result -} - -func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { - var diags tfdiags.Diagnostics - instanceAddr := n.Addr - absAddr := instanceAddr.Absolute(ctx.Path()) - - // this self is only used for destroy provisioner evaluation, and must - // refer to the last known value of the resource. - self := (*n.State).Value - - var evalScope func(EvalContext, hcl.Body, cty.Value, *configschema.Block) (cty.Value, tfdiags.Diagnostics) - switch n.When { - case configs.ProvisionerWhenDestroy: - evalScope = n.evalDestroyProvisionerConfig - default: - evalScope = n.evalProvisionerConfig - } - - // If there's a connection block defined directly inside the resource block - // then it'll serve as a base connection configuration for all of the - // provisioners. - var baseConn hcl.Body - if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { - baseConn = n.ResourceConfig.Managed.Connection.Config - } - - for _, prov := range provs { - log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) - - // Get the provisioner - provisioner := ctx.Provisioner(prov.Type) - schema := ctx.ProvisionerSchema(prov.Type) - - config, configDiags := evalScope(ctx, prov.Config, self, schema) - diags = diags.Append(configDiags) - if diags.HasErrors() { - return diags.Err() - } - - // If the provisioner block contains a connection block of its own then - // it can override the base connection configuration, if any. - var localConn hcl.Body - if prov.Connection != nil { - localConn = prov.Connection.Config - } - - var connBody hcl.Body - switch { - case baseConn != nil && localConn != nil: - // Our standard merging logic applies here, similar to what we do - // with _override.tf configuration files: arguments from the - // base connection block will be masked by any arguments of the - // same name in the local connection block. - connBody = configs.MergeBodies(baseConn, localConn) - case baseConn != nil: - connBody = baseConn - case localConn != nil: - connBody = localConn - } - - // start with an empty connInfo - connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) - - if connBody != nil { - var connInfoDiags tfdiags.Diagnostics - connInfo, connInfoDiags = evalScope(ctx, connBody, self, connectionBlockSupersetSchema) - diags = diags.Append(connInfoDiags) - if diags.HasErrors() { - return diags.Err() - } - } - - { - // Call pre hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionInstanceStep(absAddr, prov.Type) - }) - if err != nil { - return err - } - } - - // The output function - outputFn := func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(absAddr, prov.Type, msg) - return HookActionContinue, nil - }) - } - - // If our config or connection info contains any marked values, ensure - // those are stripped out before sending to the provisioner. Unlike - // resources, we have no need to capture the marked paths and reapply - // later. - unmarkedConfig, configMarks := config.UnmarkDeep() - unmarkedConnInfo, _ := connInfo.UnmarkDeep() - - // Marks on the config might result in leaking sensitive values through - // provisioner logging, so we conservatively suppress all output in - // this case. This should not apply to connection info values, which - // provisioners ought not to be logging anyway. - if len(configMarks) > 0 { - outputFn = func(msg string) { - ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(absAddr, prov.Type, "(output suppressed due to sensitive value in config)") - return HookActionContinue, nil - }) - } - } - - output := CallbackUIOutput{OutputFn: outputFn} - resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ - Config: unmarkedConfig, - Connection: unmarkedConnInfo, - UIOutput: &output, - }) - applyDiags := resp.Diagnostics.InConfigBody(prov.Config) - - // Call post hook - hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) - }) - - switch prov.OnFailure { - case configs.ProvisionerOnFailureContinue: - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) - } else { - // Maybe there are warnings that we still want to see - diags = diags.Append(applyDiags) - } - default: - diags = diags.Append(applyDiags) - if applyDiags.HasErrors() { - log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) - return diags.Err() - } - } - - // Deal with the hook - if hookErr != nil { - return hookErr - } - } - - // we have to drop warning-only diagnostics for now - if diags.HasErrors() { - return diags.ErrWithWarnings() - } - - // log any warnings since we can't return them - if e := diags.ErrWithWarnings(); e != nil { - log.Printf("[WARN] EvalApplyProvisioners %s: %v", n.Addr, e) - } - - return nil -} - -func (n *EvalApplyProvisioners) evalProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - forEach, forEachDiags := evaluateForEachExpression(n.ResourceConfig.ForEach, ctx) - diags = diags.Append(forEachDiags) - - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - config, _, configDiags := ctx.EvaluateBlock(body, schema, n.Addr, keyData) - diags = diags.Append(configDiags) - - return config, diags -} - -// during destroy a provisioner can only evaluate within the scope of the parent resource -func (n *EvalApplyProvisioners) evalDestroyProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - - // For a destroy-time provisioner forEach is intentionally nil here, - // which EvalDataForInstanceKey responds to by not populating EachValue - // in its result. That's okay because each.value is prohibited for - // destroy-time provisioners. - keyData := EvalDataForInstanceKey(n.Addr.Key, nil) - - evalScope := ctx.EvaluationScope(n.Addr, keyData) - config, evalDiags := evalScope.EvalSelfBlock(body, self, schema, keyData) - diags = diags.Append(evalDiags) - - return config, diags -} diff --git a/terraform/eval_context.go b/terraform/eval_context.go index 6c8a5d9d1..49cac746a 100644 --- a/terraform/eval_context.go +++ b/terraform/eval_context.go @@ -77,22 +77,16 @@ type EvalContext interface { ProviderInput(addrs.AbsProviderConfig) map[string]cty.Value SetProviderInput(addrs.AbsProviderConfig, map[string]cty.Value) - // InitProvisioner initializes the provisioner with the given name. - // It is an error to initialize the same provisioner more than once. - InitProvisioner(string) error - - // Provisioner gets the provisioner instance with the given name (already - // initialized) or returns nil if the provisioner isn't initialized. - Provisioner(string) provisioners.Interface + // Provisioner gets the provisioner instance with the given name. + Provisioner(string) (provisioners.Interface, error) // ProvisionerSchema retrieves the main configuration schema for a // particular provisioner, which must have already been initialized with // InitProvisioner. ProvisionerSchema(string) *configschema.Block - // CloseProvisioner closes provisioner connections that aren't needed - // anymore. - CloseProvisioner(string) error + // CloseProvisioner closes all provisioner plugins. + CloseProvisioners() error // EvaluateBlock takes the given raw configuration block and associated // schema and evaluates it to produce a value of an object type that diff --git a/terraform/eval_context_builtin.go b/terraform/eval_context_builtin.go index 67264948e..d477c00c1 100644 --- a/terraform/eval_context_builtin.go +++ b/terraform/eval_context_builtin.go @@ -107,7 +107,7 @@ func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { case HookActionHalt: // Return an early exit error to trigger an early exit log.Printf("[WARN] Early exit triggered by hook: %T", h) - return EvalEarlyExitError{} + return nil } } @@ -228,48 +228,41 @@ func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.AbsProviderConfig, c ma ctx.ProviderLock.Unlock() } -func (ctx *BuiltinEvalContext) InitProvisioner(n string) error { - // If we already initialized, it is an error - if p := ctx.Provisioner(n); p != nil { - return fmt.Errorf("Provisioner '%s' already initialized", n) - } - - // Warning: make sure to acquire these locks AFTER the call to Provisioner - // above, since it also acquires locks. +func (ctx *BuiltinEvalContext) Provisioner(n string) (provisioners.Interface, error) { ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - p, err := ctx.Components.ResourceProvisioner(n) - if err != nil { - return err + p, ok := ctx.ProvisionerCache[n] + if !ok { + var err error + p, err = ctx.Components.ResourceProvisioner(n) + if err != nil { + return nil, err + } + + ctx.ProvisionerCache[n] = p } - ctx.ProvisionerCache[n] = p - - return nil -} - -func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { - ctx.ProvisionerLock.Lock() - defer ctx.ProvisionerLock.Unlock() - - return ctx.ProvisionerCache[n] + return p, nil } func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { return ctx.Schemas.ProvisionerConfig(n) } -func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { +func (ctx *BuiltinEvalContext) CloseProvisioners() error { + var diags tfdiags.Diagnostics ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - prov := ctx.ProvisionerCache[n] - if prov != nil { - return prov.Close() + for name, prov := range ctx.ProvisionerCache { + err := prov.Close() + if err != nil { + diags = diags.Append(fmt.Errorf("provisioner.Close %s: %s", name, err)) + } } - return nil + return diags.Err() } func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { @@ -297,7 +290,19 @@ func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyData: keyData, Operation: ctx.Evaluator.Operation, } - return ctx.Evaluator.Scope(data, self) + scope := ctx.Evaluator.Scope(data, self) + + // ctx.PathValue is the path of the module that contains whatever + // expression the caller will be trying to evaluate, so this will + // activate only the experiments from that particular module, to + // be consistent with how experiment checking in the "configs" + // package itself works. The nil check here is for robustness in + // incompletely-mocked testing situations; mc should never be nil in + // real situations. + if mc := ctx.Evaluator.Config.DescendentForInstance(ctx.PathValue); mc != nil { + scope.SetActiveExperiments(mc.Module.ActiveExperiments) + } + return scope } func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { @@ -320,7 +325,6 @@ func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance args := ctx.VariableValues[key] if args == nil { - args = make(map[string]cty.Value) ctx.VariableValues[key] = vals return } diff --git a/terraform/eval_context_mock.go b/terraform/eval_context_mock.go index 11ae6941f..aa25f75f9 100644 --- a/terraform/eval_context_mock.go +++ b/terraform/eval_context_mock.go @@ -55,16 +55,14 @@ type MockEvalContext struct { SetProviderInputAddr addrs.AbsProviderConfig SetProviderInputValues map[string]cty.Value + ConfigureProviderFn func( + addr addrs.AbsProviderConfig, + cfg cty.Value) tfdiags.Diagnostics // overrides the other values below, if set ConfigureProviderCalled bool ConfigureProviderAddr addrs.AbsProviderConfig ConfigureProviderConfig cty.Value ConfigureProviderDiags tfdiags.Diagnostics - InitProvisionerCalled bool - InitProvisionerName string - InitProvisionerProvisioner provisioners.Interface - InitProvisionerError error - ProvisionerCalled bool ProvisionerName string ProvisionerProvisioner provisioners.Interface @@ -73,9 +71,7 @@ type MockEvalContext struct { ProvisionerSchemaName string ProvisionerSchemaSchema *configschema.Block - CloseProvisionerCalled bool - CloseProvisionerName string - CloseProvisionerProvisioner provisioners.Interface + CloseProvisionersCalled bool EvaluateBlockCalled bool EvaluateBlockBody hcl.Body @@ -183,9 +179,13 @@ func (c *MockEvalContext) CloseProvider(addr addrs.AbsProviderConfig) error { } func (c *MockEvalContext) ConfigureProvider(addr addrs.AbsProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + c.ConfigureProviderCalled = true c.ConfigureProviderAddr = addr c.ConfigureProviderConfig = cfg + if c.ConfigureProviderFn != nil { + return c.ConfigureProviderFn(addr, cfg) + } return c.ConfigureProviderDiags } @@ -201,16 +201,10 @@ func (c *MockEvalContext) SetProviderInput(addr addrs.AbsProviderConfig, vals ma c.SetProviderInputValues = vals } -func (c *MockEvalContext) InitProvisioner(n string) error { - c.InitProvisionerCalled = true - c.InitProvisionerName = n - return c.InitProvisionerError -} - -func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { +func (c *MockEvalContext) Provisioner(n string) (provisioners.Interface, error) { c.ProvisionerCalled = true c.ProvisionerName = n - return c.ProvisionerProvisioner + return c.ProvisionerProvisioner, nil } func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { @@ -219,9 +213,8 @@ func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { return c.ProvisionerSchemaSchema } -func (c *MockEvalContext) CloseProvisioner(n string) error { - c.CloseProvisionerCalled = true - c.CloseProvisionerName = n +func (c *MockEvalContext) CloseProvisioners() error { + c.CloseProvisionersCalled = true return nil } diff --git a/terraform/eval_count.go b/terraform/eval_count.go index 524707797..38a41b8d5 100644 --- a/terraform/eval_count.go +++ b/terraform/eval_count.go @@ -60,6 +60,10 @@ func evaluateCountExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.Val return nullCount, diags } + // Unmark the count value, sensitive values are allowed in count but not for_each, + // as using it here will not disclose the sensitive value + countVal, _ = countVal.Unmark() + switch { case countVal.IsNull(): diags = diags.Append(&hcl.Diagnostic{ diff --git a/terraform/eval_count_test.go b/terraform/eval_count_test.go new file mode 100644 index 000000000..11e25400b --- /dev/null +++ b/terraform/eval_count_test.go @@ -0,0 +1,45 @@ +package terraform + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcltest" + "github.com/zclconf/go-cty/cty" +) + +func TestEvaluateCountExpression(t *testing.T) { + tests := map[string]struct { + Expr hcl.Expression + Count int + }{ + "zero": { + hcltest.MockExprLiteral(cty.NumberIntVal(0)), + 0, + }, + "expression with marked value": { + hcltest.MockExprLiteral(cty.NumberIntVal(8).Mark("sensitive")), + 8, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + countVal, diags := evaluateCountExpression(test.Expr, ctx) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + + if !reflect.DeepEqual(countVal, test.Count) { + t.Errorf( + "wrong map value\ngot: %swant: %s", + spew.Sdump(countVal), spew.Sdump(test.Count), + ) + } + }) + } +} diff --git a/terraform/eval_diff.go b/terraform/eval_diff.go deleted file mode 100644 index cc3a92175..000000000 --- a/terraform/eval_diff.go +++ /dev/null @@ -1,899 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "reflect" - "strings" - - "github.com/hashicorp/hcl/v2" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalCheckPlannedChange is an EvalNode implementation that produces errors -// if the _actual_ expected value is not compatible with what was recorded -// in the plan. -// -// Errors here are most often indicative of a bug in the provider, so our -// error messages will report with that in mind. It's also possible that -// there's a bug in Terraform's Core's own "proposed new value" code in -// EvalDiff. -type EvalCheckPlannedChange struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - ProviderSchema **ProviderSchema - - // We take ResourceInstanceChange objects here just because that's what's - // convenient to pass in from the evaltree implementation, but we really - // only look at the "After" value of each change. - Planned, Actual **plans.ResourceInstanceChange -} - -func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { - providerSchema := *n.ProviderSchema - plannedChange := *n.Planned - actualChange := *n.Actual - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) - } - - var diags tfdiags.Diagnostics - absAddr := n.Addr.Absolute(ctx.Path()) - - log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) - - if plannedChange.Action != actualChange.Action { - switch { - case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: - // It's okay for an update to become a NoOp once we've filled in - // all of the unknown values, since the final values might actually - // match what was there before after all. - log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) - - case (plannedChange.Action == plans.CreateThenDelete && actualChange.Action == plans.DeleteThenCreate) || - (plannedChange.Action == plans.DeleteThenCreate && actualChange.Action == plans.CreateThenDelete): - // If the order of replacement changed, then that is a bug in terraform - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Terraform produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, the planned action changed from %s to %s.\n\nThis is a bug in Terraform and should be reported.", - absAddr, plannedChange.Action, actualChange.Action, - ), - )) - default: - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.Provider.String(), - plannedChange.Action, actualChange.Action, - ), - )) - } - } - - errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced inconsistent final plan", - fmt.Sprintf( - "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - absAddr, n.ProviderAddr.Provider.String(), tfdiags.FormatError(err), - ), - )) - } - return nil, diags.Err() -} - -// EvalDiff is an EvalNode implementation that detects changes for a given -// resource instance. -type EvalDiff struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - PreviousDiff **plans.ResourceInstanceChange - - // CreateBeforeDestroy is set if either the resource's own config sets - // create_before_destroy explicitly or if dependencies have forced the - // resource to be handled as create_before_destroy in order to avoid - // a dependency cycle. - CreateBeforeDestroy bool - - OutputChange **plans.ResourceInstanceChange - OutputState **states.ResourceInstanceObject - - Stub bool -} - -// TODO: test -func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - config := *n.Config - provider := *n.Provider - providerSchema := *n.ProviderSchema - - createBeforeDestroy := n.CreateBeforeDestroy - if n.PreviousDiff != nil { - // If we already planned the action, we stick to that plan - createBeforeDestroy = (*n.PreviousDiff).Action == plans.CreateThenDelete - } - - if providerSchema == nil { - return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) - } - if n.ProviderAddr.Provider.Type == "" { - panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) - } - - var diags tfdiags.Diagnostics - - // Evaluate the configuration - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - origConfigVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.Provider]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ProviderAddr.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - } - } - - absAddr := n.Addr.Absolute(ctx.Path()) - var priorVal cty.Value - var priorValTainted cty.Value - var priorPrivate []byte - if state != nil { - if state.Status != states.ObjectTainted { - priorVal = state.Value - priorPrivate = state.Private - } else { - // If the prior state is tainted then we'll proceed below like - // we're creating an entirely new object, but then turn it into - // a synthetic "Replace" change at the end, creating the same - // result as if the provider had marked at least one argument - // change as "requires replacement". - priorValTainted = state.Value - priorVal = cty.NullVal(schema.ImpliedType()) - } - } else { - priorVal = cty.NullVal(schema.ImpliedType()) - } - - // Create an unmarked version of our config val and our prior val. - // Store the paths for the config val to re-markafter - // we've sent things over the wire. - unmarkedConfigVal, unmarkedPaths := origConfigVal.UnmarkDeepWithPaths() - unmarkedPriorVal, priorPaths := priorVal.UnmarkDeepWithPaths() - - // ignore_changes is meant to only apply to the configuration, so it must - // be applied before we generate a plan. This ensures the config used for - // the proposed value, the proposed value itself, and the config presented - // to the provider in the PlanResourceChange request all agree on the - // starting values. - configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(unmarkedPriorVal, unmarkedConfigVal) - diags = diags.Append(ignoreChangeDiags) - if ignoreChangeDiags.HasErrors() { - return nil, diags.Err() - } - - proposedNewVal := objchange.ProposedNewObject(schema, unmarkedPriorVal, configValIgnored) - - // Call pre-diff hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }) - if err != nil { - return nil, err - } - } - - log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path())) - // Allow the provider to validate the final set of values. - // The config was statically validated early on, but there may have been - // unknown values which the provider could not validate at the time. - validateResp := provider.ValidateResourceTypeConfig( - providers.ValidateResourceTypeConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configValIgnored, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err() - } - - resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: configValIgnored, - PriorState: unmarkedPriorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: priorPrivate, - ProviderMeta: metaConfigVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - if diags.HasErrors() { - return nil, diags.Err() - } - - plannedNewVal := resp.PlannedState - plannedPrivate := resp.PlannedPrivate - - if plannedNewVal == cty.NilVal { - // Should never happen. Since real-world providers return via RPC a nil - // is always a bug in the client-side stub. This is more likely caused - // by an incompletely-configured mock provider in tests, though. - panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) - } - - // We allow the planned new value to disagree with configuration _values_ - // here, since that allows the provider to do special logic like a - // DiffSuppressFunc, but we still require that the provider produces - // a value whose type conforms to the schema. - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - if errs := objchange.AssertPlanValid(schema, unmarkedPriorVal, configValIgnored, plannedNewVal); len(errs) > 0 { - if resp.LegacyTypeSystem { - // The shimming of the old type system in the legacy SDK is not precise - // enough to pass this consistency check, so we'll give it a pass here, - // but we will generate a warning about it so that we are more likely - // to notice in the logs if an inconsistency beyond the type system - // leads to a downstream provider failure. - var buf strings.Builder - fmt.Fprintf(&buf, - "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", - n.ProviderAddr.Provider.String(), absAddr, - ) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } else { - for _, err := range errs { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - return nil, diags.Err() - } - } - - // Add the marks back to the planned new value -- this must happen after ignore changes - // have been processed - unmarkedPlannedNewVal := plannedNewVal - if len(unmarkedPaths) > 0 { - plannedNewVal = plannedNewVal.MarkWithPaths(unmarkedPaths) - } - - // The provider produces a list of paths to attributes whose changes mean - // that we must replace rather than update an existing remote object. - // However, we only need to do that if the identified attributes _have_ - // actually changed -- particularly after we may have undone some of the - // changes in processIgnoreChanges -- so now we'll filter that list to - // include only where changes are detected. - reqRep := cty.NewPathSet() - if len(resp.RequiresReplace) > 0 { - for _, path := range resp.RequiresReplace { - if priorVal.IsNull() { - // If prior is null then we don't expect any RequiresReplace at all, - // because this is a Create action. - continue - } - - priorChangedVal, priorPathDiags := hcl.ApplyPath(unmarkedPriorVal, path, nil) - plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) - if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { - // This means the path was invalid in both the prior and new - // values, which is an error with the provider itself. - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, path, - ), - )) - continue - } - - // Make sure we have valid Values for both values. - // Note: if the opposing value was of the type - // cty.DynamicPseudoType, the type assigned here may not exactly - // match the schema. This is fine here, since we're only going to - // check for equality, but if the NullVal is to be used, we need to - // check the schema for th true type. - switch { - case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: - // this should never happen without ApplyPath errors above - panic("requires replace path returned 2 nil values") - case priorChangedVal == cty.NilVal: - priorChangedVal = cty.NullVal(plannedChangedVal.Type()) - case plannedChangedVal == cty.NilVal: - plannedChangedVal = cty.NullVal(priorChangedVal.Type()) - } - - // Unmark for this value for the equality test. If only sensitivity has changed, - // this does not require an Update or Replace - unmarkedPlannedChangedVal, _ := plannedChangedVal.UnmarkDeep() - eqV := unmarkedPlannedChangedVal.Equals(priorChangedVal) - if !eqV.IsKnown() || eqV.False() { - reqRep.Add(path) - } - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - // Unmark for this test for value equality. - eqV := unmarkedPlannedNewVal.Equals(unmarkedPriorVal) - eq := eqV.IsKnown() && eqV.True() - - var action plans.Action - switch { - case priorVal.IsNull(): - action = plans.Create - case eq: - action = plans.NoOp - case !reqRep.Empty(): - // If there are any "requires replace" paths left _after our filtering - // above_ then this is a replace action. - if createBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - default: - action = plans.Update - // "Delete" is never chosen here, because deletion plans are always - // created more directly elsewhere, such as in "orphan" handling. - } - - if action.IsReplace() { - // In this strange situation we want to produce a change object that - // shows our real prior object but has a _new_ object that is built - // from a null prior object, since we're going to delete the one - // that has all the computed values on it. - // - // Therefore we'll ask the provider to plan again here, giving it - // a null object for the prior, and then we'll meld that with the - // _actual_ prior state to produce a correctly-shaped replace change. - // The resulting change should show any computed attributes changing - // from known prior values to unknown values, unless the provider is - // able to predict new values for any of these computed attributes. - nullPriorVal := cty.NullVal(schema.ImpliedType()) - - // Since there is no prior state to compare after replacement, we need - // a new unmarked config from our original with no ignored values. - unmarkedConfigVal := origConfigVal - if origConfigVal.ContainsMarked() { - unmarkedConfigVal, _ = origConfigVal.UnmarkDeep() - } - - // create a new proposed value from the null state and the config - proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, unmarkedConfigVal) - - resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ - TypeName: n.Addr.Resource.Type, - Config: unmarkedConfigVal, - PriorState: nullPriorVal, - ProposedNewState: proposedNewVal, - PriorPrivate: plannedPrivate, - ProviderMeta: metaConfigVal, - }) - // We need to tread carefully here, since if there are any warnings - // in here they probably also came out of our previous call to - // PlanResourceChange above, and so we don't want to repeat them. - // Consequently, we break from the usual pattern here and only - // append these new diagnostics if there's at least one error inside. - if resp.Diagnostics.HasErrors() { - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - return nil, diags.Err() - } - plannedNewVal = resp.PlannedState - plannedPrivate = resp.PlannedPrivate - - if len(unmarkedPaths) > 0 { - plannedNewVal = plannedNewVal.MarkWithPaths(unmarkedPaths) - } - - for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid plan", - fmt.Sprintf( - "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - } - - // If our prior value was tainted then we actually want this to appear - // as a replace change, even though so far we've been treating it as a - // create. - if action == plans.Create && priorValTainted != cty.NilVal { - if createBeforeDestroy { - action = plans.CreateThenDelete - } else { - action = plans.DeleteThenCreate - } - priorVal = priorValTainted - } - - // If we plan to write or delete sensitive paths from state, - // this is an Update action - if action == plans.NoOp && !reflect.DeepEqual(priorPaths, unmarkedPaths) { - action = plans.Update - } - - // As a special case, if we have a previous diff (presumably from the plan - // phases, whereas we're now in the apply phase) and it was for a replace, - // we've already deleted the original object from state by the time we - // get here and so we would've ended up with a _create_ action this time, - // which we now need to paper over to get a result consistent with what - // we originally intended. - if n.PreviousDiff != nil { - prevChange := *n.PreviousDiff - if prevChange.Action.IsReplace() && action == plans.Create { - log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) - action = prevChange.Action - priorVal = prevChange.Before - } - } - - // Call post-refresh hook - if !n.Stub { - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) - }) - if err != nil { - return nil, err - } - } - - // Update our output if we care - if n.OutputChange != nil { - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - Private: plannedPrivate, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: action, - Before: priorVal, - // Pass the marked planned value through in our change - // to propogate through evaluation. - // Marks will be removed when encoding. - After: plannedNewVal, - }, - RequiredReplace: reqRep, - } - } - - // Update the state if we care - if n.OutputState != nil { - *n.OutputState = &states.ResourceInstanceObject{ - // We use the special "planned" status here to note that this - // object's value is not yet complete. Objects with this status - // cannot be used during expression evaluation, so the caller - // must _also_ record the returned change in the active plan, - // which the expression evaluator will use in preference to this - // incomplete value recorded in the state. - Status: states.ObjectPlanned, - Value: plannedNewVal, - Private: plannedPrivate, - } - } - - return nil, nil -} - -func (n *EvalDiff) processIgnoreChanges(prior, config cty.Value) (cty.Value, tfdiags.Diagnostics) { - // ignore_changes only applies when an object already exists, since we - // can't ignore changes to a thing we've not created yet. - if prior.IsNull() { - return config, nil - } - - ignoreChanges := n.Config.Managed.IgnoreChanges - ignoreAll := n.Config.Managed.IgnoreAllChanges - - if len(ignoreChanges) == 0 && !ignoreAll { - return config, nil - } - if ignoreAll { - return prior, nil - } - if prior.IsNull() || config.IsNull() { - // Ignore changes doesn't apply when we're creating for the first time. - // Proposed should never be null here, but if it is then we'll just let it be. - return config, nil - } - - return processIgnoreChangesIndividual(prior, config, ignoreChanges) -} - -func processIgnoreChangesIndividual(prior, config cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { - // When we walk below we will be using cty.Path values for comparison, so - // we'll convert our traversals here so we can compare more easily. - ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) - for i, traversal := range ignoreChanges { - path := make(cty.Path, len(traversal)) - for si, step := range traversal { - switch ts := step.(type) { - case hcl.TraverseRoot: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseAttr: - path[si] = cty.GetAttrStep{ - Name: ts.Name, - } - case hcl.TraverseIndex: - path[si] = cty.IndexStep{ - Key: ts.Key, - } - default: - panic(fmt.Sprintf("unsupported traversal step %#v", step)) - } - } - ignoreChangesPath[i] = path - } - - type ignoreChange struct { - // Path is the full path, minus any trailing map index - path cty.Path - // Value is the value we are to retain at the above path. If there is a - // key value, this must be a map and the desired value will be at the - // key index. - value cty.Value - // Key is the index key if the ignored path ends in a map index. - key cty.Value - } - var ignoredValues []ignoreChange - - // Find the actual changes first and store them in the ignoreChange struct. - // If the change was to a map value, and the key doesn't exist in the - // config, it would never be visited in the transform walk. - for _, icPath := range ignoreChangesPath { - key := cty.NullVal(cty.String) - // check for a map index, since maps are the only structure where we - // could have invalid path steps. - last, ok := icPath[len(icPath)-1].(cty.IndexStep) - if ok { - if last.Key.Type() == cty.String { - icPath = icPath[:len(icPath)-1] - key = last.Key - } - } - - // The structure should have been validated already, and we already - // trimmed the trailing map index. Any other intermediate index error - // means we wouldn't be able to apply the value below, so no need to - // record this. - p, err := icPath.Apply(prior) - if err != nil { - continue - } - c, err := icPath.Apply(config) - if err != nil { - continue - } - - // If this is a map, it is checking the entire map value for equality - // rather than the individual key. This means that the change is stored - // here even if our ignored key doesn't change. That is OK since it - // won't cause any changes in the transformation, but allows us to skip - // breaking up the maps and checking for key existence here too. - eq := p.Equals(c) - if eq.IsKnown() && eq.False() { - // there a change to ignore at this path, store the prior value - ignoredValues = append(ignoredValues, ignoreChange{icPath, p, key}) - } - } - - if len(ignoredValues) == 0 { - return config, nil - } - - ret, _ := cty.Transform(config, func(path cty.Path, v cty.Value) (cty.Value, error) { - for _, ignored := range ignoredValues { - if !path.Equals(ignored.path) { - return v, nil - } - - // no index, so we can return the entire value - if ignored.key.IsNull() { - return ignored.value, nil - } - - // we have an index key, so make sure we have a map - if !v.Type().IsMapType() { - // we'll let other validation catch any type mismatch - return v, nil - } - - // Now we know we are ignoring a specific index of this map, so get - // the config map and modify, add, or remove the desired key. - var configMap map[string]cty.Value - var priorMap map[string]cty.Value - - if !v.IsNull() { - if !v.IsKnown() { - // if the entire map is not known, we can't ignore any - // specific keys yet. - continue - } - configMap = v.AsValueMap() - } - if configMap == nil { - configMap = map[string]cty.Value{} - } - - // We also need to create a prior map, so we can check for - // existence while getting the value. Value.Index will always - // return null. - if !ignored.value.IsNull() { - priorMap = ignored.value.AsValueMap() - } - if priorMap == nil { - priorMap = map[string]cty.Value{} - } - - key := ignored.key.AsString() - priorElem, keep := priorMap[key] - - switch { - case !keep: - // this didn't exist in the old map value, so we're keeping the - // "absence" of the key by removing it from the config - delete(configMap, key) - default: - configMap[key] = priorElem - } - - if len(configMap) == 0 { - return cty.MapValEmpty(v.Type().ElementType()), nil - } - - return cty.MapVal(configMap), nil - } - return v, nil - }) - return ret, nil -} - -// EvalDiffDestroy is an EvalNode implementation that returns a plain -// destroy diff. -type EvalDiffDestroy struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - State **states.ResourceInstanceObject - ProviderAddr addrs.AbsProviderConfig - - Output **plans.ResourceInstanceChange - OutputState **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := *n.State - - if n.ProviderAddr.Provider.Type == "" { - if n.DeposedKey == "" { - panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) - } else { - panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) - } - } - - // If there is no state or our attributes object is null then we're already - // destroyed. - if state == nil || state.Value.IsNull() { - return nil, nil - } - - // Call pre-diff hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff( - absAddr, n.DeposedKey.Generation(), - state.Value, - cty.NullVal(cty.DynamicPseudoType), - ) - }) - if err != nil { - return nil, err - } - - // Change is always the same for a destroy. We don't need the provider's - // help for this one. - // TODO: Should we give the provider an opportunity to veto this? - change := &plans.ResourceInstanceChange{ - Addr: absAddr, - DeposedKey: n.DeposedKey, - Change: plans.Change{ - Action: plans.Delete, - Before: state.Value, - After: cty.NullVal(cty.DynamicPseudoType), - }, - Private: state.Private, - ProviderAddr: n.ProviderAddr, - } - - // Call post-diff hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff( - absAddr, - n.DeposedKey.Generation(), - change.Action, - change.Before, - change.After, - ) - }) - if err != nil { - return nil, err - } - - // Update our output - *n.Output = change - - if n.OutputState != nil { - // Record our proposed new state, which is nil because we're destroying. - *n.OutputState = nil - } - - return nil, nil -} - -// EvalReduceDiff is an EvalNode implementation that takes a planned resource -// instance change as might be produced by EvalDiff or EvalDiffDestroy and -// "simplifies" it to a single atomic action to be performed by a specific -// graph node. -// -// Callers must specify whether they are a destroy node or a regular apply -// node. If the result is NoOp then the given change requires no action for -// the specific graph node calling this and so evaluation of the that graph -// node should exit early and take no action. -// -// The object written to OutChange may either be identical to InChange or -// a new change object derived from InChange. Because of the former case, the -// caller must not mutate the object returned in OutChange. -type EvalReduceDiff struct { - Addr addrs.ResourceInstance - InChange **plans.ResourceInstanceChange - Destroy bool - OutChange **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { - in := *n.InChange - out := in.Simplify(n.Destroy) - if n.OutChange != nil { - *n.OutChange = out - } - if out.Action != in.Action { - if n.Destroy { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) - } else { - log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) - } - } - return nil, nil -} - -// EvalWriteDiff is an EvalNode implementation that saves a planned change -// for an instance object into the set of global planned changes. -type EvalWriteDiff struct { - Addr addrs.ResourceInstance - DeposedKey states.DeposedKey - ProviderSchema **ProviderSchema - Change **plans.ResourceInstanceChange -} - -// TODO: test -func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { - changes := ctx.Changes() - addr := n.Addr.Absolute(ctx.Path()) - if n.Change == nil || *n.Change == nil { - // Caller sets nil to indicate that we need to remove a change from - // the set of changes. - gen := states.CurrentGen - if n.DeposedKey != states.NotDeposed { - gen = n.DeposedKey - } - changes.RemoveResourceInstanceChange(addr, gen) - return nil, nil - } - - providerSchema := *n.ProviderSchema - change := *n.Change - - if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { - // Should never happen, and indicates a bug in the caller. - panic("inconsistent address and/or deposed key in EvalWriteDiff") - } - - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - csrc, err := change.Encode(schema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) - } - - changes.AppendResourceInstanceChange(csrc) - if n.DeposedKey == states.NotDeposed { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) - } else { - log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) - } - - return nil, nil -} diff --git a/terraform/eval_error.go b/terraform/eval_error.go deleted file mode 100644 index 853ea2cc8..000000000 --- a/terraform/eval_error.go +++ /dev/null @@ -1,7 +0,0 @@ -package terraform - -// EvalEarlyExitError is a special error return value that can be returned -// by eval nodes that does an early exit. -type EvalEarlyExitError struct{} - -func (EvalEarlyExitError) Error() string { return "early exit" } diff --git a/terraform/eval_for_each.go b/terraform/eval_for_each.go index 75e6eabf1..d2be0a2c4 100644 --- a/terraform/eval_for_each.go +++ b/terraform/eval_for_each.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/lang" "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) @@ -17,18 +18,11 @@ import ( // returning an error if the count value is not known, and converting the // cty.Value to a map[string]cty.Value for compatibility with other calls. func evaluateForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { - forEachVal, diags := evaluateForEachExpressionValue(expr, ctx) - if !forEachVal.IsKnown() { - // Attach a diag as we do with count, with the same downsides - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`, - Subject: expr.Range().Ptr(), - }) - } + forEachVal, diags := evaluateForEachExpressionValue(expr, ctx, false) + // forEachVal might be unknown, but if it is then there should already + // be an error about it in diags, which we'll return below. - if forEachVal.IsNull() || !forEachVal.IsKnown() || forEachVal.LengthInt() == 0 { + if forEachVal.IsNull() || !forEachVal.IsKnown() || markSafeLengthInt(forEachVal) == 0 { // we check length, because an empty set return a nil map return map[string]cty.Value{}, diags } @@ -38,7 +32,7 @@ func evaluateForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach ma // evaluateForEachExpressionValue is like evaluateForEachExpression // except that it returns a cty.Value map or set which can be unknown. -func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { +func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext, allowUnknown bool) (cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics nullMap := cty.NullVal(cty.Map(cty.DynamicPseudoType)) @@ -46,16 +40,38 @@ func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.V return nullMap, diags } - forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) + refs, moreDiags := lang.ReferencesInExpr(expr) + diags = diags.Append(moreDiags) + scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) + var hclCtx *hcl.EvalContext + if scope != nil { + hclCtx, moreDiags = scope.EvalContext(refs) + } else { + // This shouldn't happen in real code, but it can unfortunately arise + // in unit tests due to incompletely-implemented mocks. :( + hclCtx = &hcl.EvalContext{} + } + diags = diags.Append(moreDiags) + if diags.HasErrors() { // Can't continue if we don't even have a valid scope + return nullMap, diags + } + + forEachVal, forEachDiags := expr.Value(hclCtx) diags = diags.Append(forEachDiags) - if forEachVal.ContainsMarked() { + + // If a whole map is marked, or a set contains marked values (which means the set is then marked) + // give an error diagnostic as this value cannot be used in for_each + if forEachVal.IsMarked() { diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: "Sensitive variable, or values derived from sensitive variables, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", - Subject: expr.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, }) } + if diags.HasErrors() { return nullMap, diags } @@ -64,26 +80,40 @@ func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.V switch { case forEachVal.IsNull(): diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, - Subject: expr.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, }) return nullMap, diags case !forEachVal.IsKnown(): + if !allowUnknown { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: errInvalidForEachUnknownDetail, + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + } // ensure that we have a map, and not a DynamicValue return cty.UnknownVal(cty.Map(cty.DynamicPseudoType)), diags case !(ty.IsMapType() || ty.IsSetType() || ty.IsObjectType()): diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, ty.FriendlyName()), - Subject: expr.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, ty.FriendlyName()), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, }) return nullMap, diags - case forEachVal.LengthInt() == 0: + case markSafeLengthInt(forEachVal) == 0: // If the map is empty ({}), return an empty map, because cty will // return nil when representing {} AsValueMap. This also covers an empty // set (toset([])) @@ -94,15 +124,27 @@ func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.V // since we can't use a set values that are unknown, we treat the // entire set as unknown if !forEachVal.IsWhollyKnown() { + if !allowUnknown { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: errInvalidForEachUnknownDetail, + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, + }) + } return cty.UnknownVal(ty), diags } if ty.ElementType() != cty.String { diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), - Subject: expr.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Invalid for_each set argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, }) return cty.NullVal(ty), diags } @@ -114,10 +156,12 @@ func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.V item, _ := it.Element() if item.IsNull() { diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid for_each set argument", - Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" sets must not contain null values.`), - Subject: expr.Range().Ptr(), + Severity: hcl.DiagError, + Summary: "Invalid for_each set argument", + Detail: `The given "for_each" argument value is unsuitable: "for_each" sets must not contain null values.`, + Subject: expr.Range().Ptr(), + Expression: expr, + EvalContext: hclCtx, }) return cty.NullVal(ty), diags } @@ -126,3 +170,11 @@ func evaluateForEachExpressionValue(expr hcl.Expression, ctx EvalContext) (cty.V return forEachVal, nil } + +const errInvalidForEachUnknownDetail = `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.` + +// markSafeLengthInt allows calling LengthInt on marked values safely +func markSafeLengthInt(val cty.Value) int { + v, _ := val.UnmarkDeep() + return v.LengthInt() +} diff --git a/terraform/eval_for_each_test.go b/terraform/eval_for_each_test.go index c7920a694..2f7156637 100644 --- a/terraform/eval_for_each_test.go +++ b/terraform/eval_for_each_test.go @@ -52,6 +52,16 @@ func TestEvaluateForEachExpression_valid(t *testing.T) { "b": cty.UnknownVal(cty.Bool), }, }, + "map containing sensitive values, but strings are literal": { + hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ + "a": cty.BoolVal(true).Mark("sensitive"), + "b": cty.BoolVal(false), + })), + map[string]cty.Value{ + "a": cty.BoolVal(true).Mark("sensitive"), + "b": cty.BoolVal(false), + }, + }, } for name, test := range tests { @@ -110,6 +120,14 @@ func TestEvaluateForEachExpression_errors(t *testing.T) { "Invalid for_each argument", "depends on resource attributes that cannot be determined until apply", }, + "marked map": { + hcltest.MockExprLiteral(cty.MapVal(map[string]cty.Value{ + "a": cty.BoolVal(true), + "b": cty.BoolVal(false), + }).Mark("sensitive")), + "Invalid for_each argument", + "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + }, "set containing booleans": { hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.BoolVal(true)})), "Invalid for_each set argument", @@ -130,6 +148,11 @@ func TestEvaluateForEachExpression_errors(t *testing.T) { "Invalid for_each argument", "depends on resource attributes that cannot be determined until apply", }, + "set containing marked values": { + hcltest.MockExprLiteral(cty.SetVal([]cty.Value{cty.StringVal("beep").Mark("sensitive"), cty.StringVal("boop")})), + "Invalid for_each argument", + "Sensitive values, or values derived from sensitive values, cannot be used as for_each arguments. If used, the sensitive value could be exposed as a resource instance key.", + }, } for name, test := range tests { @@ -150,6 +173,16 @@ func TestEvaluateForEachExpression_errors(t *testing.T) { if got, want := diags[0].Description().Detail, test.DetailSubstring; !strings.Contains(got, want) { t.Errorf("wrong diagnostic detail %#v; want %#v", got, want) } + if fromExpr := diags[0].FromExpr(); fromExpr != nil { + if fromExpr.Expression == nil { + t.Errorf("diagnostic does not refer to an expression") + } + if fromExpr.EvalContext == nil { + t.Errorf("diagnostic does not refer to an EvalContext") + } + } else { + t.Errorf("diagnostic does not support FromExpr\ngot: %s", spew.Sdump(diags[0])) + } }) } } @@ -164,7 +197,7 @@ func TestEvaluateForEachExpressionKnown(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := &MockEvalContext{} ctx.installSimpleEval() - forEachVal, diags := evaluateForEachExpressionValue(expr, ctx) + forEachVal, diags := evaluateForEachExpressionValue(expr, ctx, true) if len(diags) != 0 { t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) diff --git a/terraform/eval_provider.go b/terraform/eval_provider.go index 0af5964ea..9b730377f 100644 --- a/terraform/eval_provider.go +++ b/terraform/eval_provider.go @@ -45,11 +45,11 @@ func buildProviderConfig(ctx EvalContext, addr addrs.AbsProviderConfig, config * } } -// GetProvider returns the providers.Interface and schema for a given provider. -func GetProvider(ctx EvalContext, addr addrs.AbsProviderConfig) (providers.Interface, *ProviderSchema, error) { +// getProvider returns the providers.Interface and schema for a given provider. +func getProvider(ctx EvalContext, addr addrs.AbsProviderConfig) (providers.Interface, *ProviderSchema, error) { if addr.Provider.Type == "" { // Should never happen - panic("EvalGetProvider used with uninitialized provider configuration address") + panic("GetProvider used with uninitialized provider configuration address") } provider := ctx.Provider(addr) if provider == nil { diff --git a/terraform/eval_read_data.go b/terraform/eval_read_data.go deleted file mode 100644 index 33eac066d..000000000 --- a/terraform/eval_read_data.go +++ /dev/null @@ -1,195 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// evalReadData implements shared methods and data for the individual data -// source eval nodes. -type evalReadData struct { - Addr addrs.ResourceInstance - Config *configs.Resource - Provider *providers.Interface - ProviderAddr addrs.AbsProviderConfig - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - ProviderSchema **ProviderSchema - - // Planned is set when dealing with data resources that were deferred to - // the apply walk, to let us see what was planned. If this is set, the - // evaluation of the config is required to produce a wholly-known - // configuration which is consistent with the partial object included - // in this planned change. - Planned **plans.ResourceInstanceChange - - // State is the current state for the data source, and is updated once the - // new state has been read. - // While data sources are read-only, we need to start with the prior state - // to determine if we have a change or not. If we needed to read a new - // value, but it still matches the previous state, then we can record a - // NoNop change. If the states don't match then we record a Read change so - // that the new value is applied to the state. - State **states.ResourceInstanceObject - - // Output change records any change for this data source, which is - // interpreted differently than changes for managed resources. - // - During Refresh, this change is only used to correctly evaluate - // references to the data source, but it is not saved. - // - If a planned change has the action of plans.Read, it indicates that the - // data source could not be evaluated yet, and reading is being deferred to - // apply. - // - If planned action is plans.Update, it indicates that the data source - // was read, and the result needs to be stored in state during apply. - OutputChange **plans.ResourceInstanceChange - - // dependsOn stores the list of transitive resource addresses that any - // configuration depends_on references may resolve to. This is used to - // determine if there are any changes that will force this data sources to - // be deferred to apply. - dependsOn []addrs.ConfigResource - // forceDependsOn indicates that resources may be missing from dependsOn, - // but the parent module may have depends_on configured. - forceDependsOn bool -} - -// readDataSource handles everything needed to call ReadDataSource on the provider. -// A previously evaluated configVal can be passed in, or a new one is generated -// from the resource configuration. -func (n *evalReadData) readDataSource(ctx EvalContext, configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - var newVal cty.Value - - config := *n.Config - absAddr := n.Addr.Absolute(ctx.Path()) - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - diags = diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) - return newVal, diags - } - - provider := *n.Provider - - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.Provider.String(), n.Addr.Resource.Type)) - return newVal, diags - } - - metaConfigVal, metaDiags := n.providerMetas(ctx) - diags = diags.Append(metaDiags) - if diags.HasErrors() { - return newVal, diags - } - - log.Printf("[TRACE] EvalReadData: Re-validating config for %s", absAddr) - validateResp := provider.ValidateDataSourceConfig( - providers.ValidateDataSourceConfigRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - }, - ) - if validateResp.Diagnostics.HasErrors() { - return newVal, validateResp.Diagnostics.InConfigBody(config.Config) - } - - // If we get down here then our configuration is complete and we're read - // to actually call the provider to read the data. - log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) - - resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ - TypeName: n.Addr.Resource.Type, - Config: configVal, - ProviderMeta: metaConfigVal, - }) - diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) - if diags.HasErrors() { - return newVal, diags - } - newVal = resp.State - if newVal == cty.NilVal { - // This can happen with incompletely-configured mocks. We'll allow it - // and treat it as an alias for a properly-typed null value. - newVal = cty.NullVal(schema.ImpliedType()) - } - - for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), tfdiags.FormatErrorPrefixed(err, absAddr.String()), - ), - )) - } - if diags.HasErrors() { - return newVal, diags - } - - if newVal.IsNull() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced null object", - fmt.Sprintf( - "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, - ), - )) - } - - if !newVal.IsNull() && !newVal.IsWhollyKnown() { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, - ), - )) - - // We'll still save the object, but we need to eliminate any unknown - // values first because we can't serialize them in the state file. - // Note that this may cause set elements to be coalesced if they - // differed only by having unknown values, but we don't worry about - // that here because we're saving the value only for inspection - // purposes; the error we added above will halt the graph walk. - newVal = cty.UnknownAsNull(newVal) - } - - return newVal, diags -} - -func (n *evalReadData) providerMetas(ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { - var diags tfdiags.Diagnostics - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.Provider]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ProviderAddr.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - } - } - } - return metaConfigVal, diags -} diff --git a/terraform/eval_read_data_apply.go b/terraform/eval_read_data_apply.go deleted file mode 100644 index 179fe368d..000000000 --- a/terraform/eval_read_data_apply.go +++ /dev/null @@ -1,82 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// evalReadDataApply is an EvalNode implementation that deals with the main part -// of the data resource lifecycle: either actually reading from the data source -// or generating a plan to do so. -type evalReadDataApply struct { - evalReadData -} - -func (n *evalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - var planned *plans.ResourceInstanceChange - if n.Planned != nil { - planned = *n.Planned - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema not available for %s", n.Addr) - } - - if planned != nil && planned.Action != plans.Read { - // If any other action gets in here then that's always a bug; this - // EvalNode only deals with reading. - return nil, fmt.Errorf( - "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", - planned.Action, absAddr, - ) - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(absAddr, states.CurrentGen, planned.Action, planned.Before, planned.After) - }); err != nil { - return nil, err - } - - config := *n.Config - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.Provider.String(), n.Addr.Resource.Type) - } - - forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - newVal, readDiags := n.readDataSource(ctx, configVal) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - *n.State = &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(absAddr, states.CurrentGen, newVal, diags.Err()) - }); err != nil { - diags = diags.Append(err) - } - - return nil, diags.ErrWithWarnings() -} diff --git a/terraform/eval_read_data_plan.go b/terraform/eval_read_data_plan.go deleted file mode 100644 index 226594299..000000000 --- a/terraform/eval_read_data_plan.go +++ /dev/null @@ -1,173 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// evalReadDataPlan is an EvalNode implementation that deals with the main part -// of the data resource lifecycle: either actually reading from the data source -// or generating a plan to do so. -type evalReadDataPlan struct { - evalReadData -} - -func (n *evalReadDataPlan) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - var configVal cty.Value - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema not available for %s", n.Addr) - } - - config := *n.Config - providerSchema := *n.ProviderSchema - schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.Provider.String(), n.Addr.Resource.Type) - } - - objTy := schema.ImpliedType() - priorVal := cty.NullVal(objTy) - if n.State != nil && *n.State != nil { - priorVal = (*n.State).Value - } - - forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) - - var configDiags tfdiags.Diagnostics - configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - configKnown := configVal.IsWhollyKnown() - // If our configuration contains any unknown values, or we depend on any - // unknown values then we must defer the read to the apply phase by - // producing a "Read" change for this resource, and a placeholder value for - // it in the state. - if n.forcePlanRead(ctx) || !configKnown { - if configKnown { - log.Printf("[TRACE] evalReadDataPlan: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) - } else { - log.Printf("[TRACE] evalReadDataPlan: %s configuration not fully known yet, so deferring to apply phase", absAddr) - } - - proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) - }); err != nil { - diags = diags.Append(err) - return nil, diags.ErrWithWarnings() - } - - // Apply detects that the data source will need to be read by the After - // value containing unknowns from PlanDataResourceObject. - *n.OutputChange = &plans.ResourceInstanceChange{ - Addr: absAddr, - ProviderAddr: n.ProviderAddr, - Change: plans.Change{ - Action: plans.Read, - Before: priorVal, - After: proposedNewVal, - }, - } - - *n.State = &states.ResourceInstanceObject{ - Value: proposedNewVal, - Status: states.ObjectPlanned, - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, plans.Read, priorVal, proposedNewVal) - }); err != nil { - diags = diags.Append(err) - } - - return nil, diags.ErrWithWarnings() - } - - // We have a complete configuration with no dependencies to wait on, so we - // can read the data source into the state. - newVal, readDiags := n.readDataSource(ctx, configVal) - diags = diags.Append(readDiags) - if diags.HasErrors() { - return nil, diags.ErrWithWarnings() - } - - // if we have a prior value, we can check for any irregularities in the response - if !priorVal.IsNull() { - // While we don't propose planned changes for data sources, we can - // generate a proposed value for comparison to ensure the data source - // is returning a result following the rules of the provider contract. - proposedVal := objchange.ProposedNewObject(schema, priorVal, configVal) - if errs := objchange.AssertObjectCompatible(schema, proposedVal, newVal); len(errs) > 0 { - // Resources have the LegacyTypeSystem field to signal when they are - // using an SDK which may not produce precise values. While data - // sources are read-only, they can still return a value which is not - // compatible with the config+schema. Since we can't detect the legacy - // type system, we can only warn about this for now. - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s.", - n.ProviderAddr.Provider.String(), absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } - } - - *n.State = &states.ResourceInstanceObject{ - Value: newVal, - Status: states.ObjectReady, - } - - if err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(absAddr, states.CurrentGen, plans.Update, priorVal, newVal) - }); err != nil { - return nil, err - } - - return nil, diags.ErrWithWarnings() -} - -// forcePlanRead determines if we need to override the usual behavior of -// immediately reading from the data source where possible, instead forcing us -// to generate a plan. -func (n *evalReadDataPlan) forcePlanRead(ctx EvalContext) bool { - // Check and see if any depends_on dependencies have - // changes, since they won't show up as changes in the - // configuration. - changes := ctx.Changes() - for _, d := range n.dependsOn { - if d.Resource.Mode == addrs.DataResourceMode { - // Data sources have no external side effects, so they pose a need - // to delay this read. If they do have a change planned, it must be - // because of a dependency on a managed resource, in which case - // we'll also encounter it in this list of dependencies. - continue - } - - for _, change := range changes.GetChangesForConfigResource(d) { - if change != nil && change.Action != plans.NoOp { - return true - } - } - } - return false -} diff --git a/terraform/eval_refresh.go b/terraform/eval_refresh.go deleted file mode 100644 index 737484c45..000000000 --- a/terraform/eval_refresh.go +++ /dev/null @@ -1,162 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans/objchange" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -// EvalRefresh is an EvalNode implementation that does a refresh for -// a resource. -type EvalRefresh struct { - Addr addrs.ResourceInstance - ProviderAddr addrs.AbsProviderConfig - Provider *providers.Interface - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - ProviderSchema **ProviderSchema - State **states.ResourceInstanceObject - Output **states.ResourceInstanceObject -} - -// TODO: test -func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - absAddr := n.Addr.Absolute(ctx.Path()) - - var diags tfdiags.Diagnostics - - // If we have no state, we don't do any refreshing - if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) - return nil, diags.ErrWithWarnings() - } - - schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Should be caught during validation, so we don't bother with a pretty error here - return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) - } - - metaConfigVal := cty.NullVal(cty.DynamicPseudoType) - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.Provider]; ok && m != nil { - log.Printf("[DEBUG] EvalRefresh: ProviderMeta config value set") - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - log.Printf("[DEBUG] EvalRefresh: no ProviderMeta schema") - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ProviderAddr.Provider.String()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - log.Printf("[DEBUG] EvalRefresh: ProviderMeta schema found: %+v", (*n.ProviderSchema).ProviderMeta) - var configDiags tfdiags.Diagnostics - metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return nil, diags.Err() - } - } - } - } - - // Call pre-refresh hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(absAddr, states.CurrentGen, state.Value) - }) - if err != nil { - return nil, diags.ErrWithWarnings() - } - - // Refresh! - priorVal := state.Value - - // Unmarked before sending to provider - var priorPaths []cty.PathValueMarks - if priorVal.ContainsMarked() { - priorVal, priorPaths = priorVal.UnmarkDeepWithPaths() - } - - req := providers.ReadResourceRequest{ - TypeName: n.Addr.Resource.Type, - PriorState: priorVal, - Private: state.Private, - ProviderMeta: metaConfigVal, - } - - provider := *n.Provider - resp := provider.ReadResource(req) - diags = diags.Append(resp.Diagnostics) - if diags.HasErrors() { - return nil, diags.Err() - } - - if resp.NewState == cty.NilVal { - // This ought not to happen in real cases since it's not possible to - // send NilVal over the plugin RPC channel, but it can come up in - // tests due to sloppy mocking. - panic("new state is cty.NilVal") - } - - for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Provider produced invalid object", - fmt.Sprintf( - "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", - n.ProviderAddr.Provider.String(), absAddr, tfdiags.FormatError(err), - ), - )) - } - if diags.HasErrors() { - return nil, diags.Err() - } - - // We have no way to exempt provider using the legacy SDK from this check, - // so we can only log inconsistencies with the updated state values. - if errs := objchange.AssertObjectCompatible(schema, priorVal, resp.NewState); len(errs) > 0 { - var buf strings.Builder - fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s during refresh.", n.ProviderAddr.Provider.String(), absAddr) - for _, err := range errs { - fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) - } - log.Print(buf.String()) - } - - newState := state.DeepCopy() - newState.Value = resp.NewState - newState.Private = resp.Private - newState.Dependencies = state.Dependencies - newState.CreateBeforeDestroy = state.CreateBeforeDestroy - - // Call post-refresh hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) - }) - if err != nil { - return nil, err - } - - // Mark the value if necessary - if len(priorPaths) > 0 { - newState.Value = newState.Value.MarkWithPaths(priorPaths) - } - - if n.Output != nil { - *n.Output = newState - } - - return nil, diags.ErrWithWarnings() -} diff --git a/terraform/eval_state.go b/terraform/eval_state.go deleted file mode 100644 index 1edab4516..000000000 --- a/terraform/eval_state.go +++ /dev/null @@ -1,481 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/plans" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/tfdiags" -) - -type phaseState int - -const ( - workingState phaseState = iota - refreshState -) - -// EvalReadState is an EvalNode implementation that reads the -// current object for a specific instance in the state. -type EvalReadState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadState used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadState used with no ProviderSchema object") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) - - src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// EvalReadStateDeposed is an EvalNode implementation that reads the -// deposed InstanceState for a specific resource out of the state -type EvalReadStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key identifies which deposed object we will read. - Key states.DeposedKey - - // ProviderSchema is the schema for the provider given in Provider. - ProviderSchema **ProviderSchema - - // Provider is the provider that will subsequently perform actions on - // the the state object. This is used to perform any schema upgrades - // that might be required to prepare the stored data for use. - Provider *providers.Interface - - // Output will be written with a pointer to the retrieved object. - Output **states.ResourceInstanceObject -} - -func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.Provider == nil || *n.Provider == nil { - panic("EvalReadStateDeposed used with no Provider object") - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - panic("EvalReadStateDeposed used with no ProviderSchema object") - } - - key := n.Key - if key == states.NotDeposed { - return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") - } - absAddr := n.Addr.Absolute(ctx.Path()) - log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) - - src := ctx.State().ResourceInstanceObject(absAddr, key) - if src == nil { - // Presumably we only have deposed objects, then. - log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) - return nil, nil - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // Shouldn't happen since we should've failed long ago if no schema is present - return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) - } - var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) - if diags.HasErrors() { - // Note that we don't have any channel to return warnings here. We'll - // accept that for now since warnings during a schema upgrade would - // be pretty weird anyway, since this operation is supposed to seem - // invisible to the user. - return nil, diags.Err() - } - - obj, err := src.Decode(schema.ImpliedType()) - if err != nil { - return nil, err - } - if n.Output != nil { - *n.Output = obj - } - return obj, nil -} - -// UpdateStateHook calls the PostStateUpdate hook with the current state. -func UpdateStateHook(ctx EvalContext) error { - // In principle we could grab the lock here just long enough to take a - // deep copy and then pass that to our hooks below, but we'll instead - // hold the hook for the duration to avoid the potential confusing - // situation of us racing to call PostStateUpdate concurrently with - // different state snapshots. - stateSync := ctx.State() - state := stateSync.Lock().DeepCopy() - defer stateSync.Unlock() - - // Call the hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostStateUpdate(state) - }) - return err -} - -// evalWriteEmptyState wraps EvalWriteState to specifically record an empty -// state for a particular object. -type evalWriteEmptyState struct { - EvalWriteState -} - -func (n *evalWriteEmptyState) Eval(ctx EvalContext) (interface{}, error) { - var state *states.ResourceInstanceObject - n.State = &state - return n.EvalWriteState.Eval(ctx) -} - -// EvalWriteState is an EvalNode implementation that saves the given object -// as the current object for the selected resource instance. -type EvalWriteState struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig - - // Dependencies are the inter-resource dependencies to be stored in the - // state. - Dependencies *[]addrs.ConfigResource - - // targetState determines which context state we're writing to during plan. - // The default is the global working state. - targetState phaseState -} - -func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteState used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - - var state *states.SyncState - switch n.targetState { - case refreshState: - log.Printf("[TRACE] EvalWriteState: using RefreshState for %s", absAddr) - state = ctx.RefreshState() - default: - state = ctx.State() - } - - if n.ProviderAddr.Provider.Type == "" { - return nil, fmt.Errorf("failed to write state for %s: missing provider type", absAddr) - } - obj := *n.State - if obj == nil || obj.Value.IsNull() { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) - return nil, nil - } - - // store the new deps in the state - if n.Dependencies != nil { - log.Printf("[TRACE] EvalWriteState: recording %d dependencies for %s", len(*n.Dependencies), absAddr) - obj.Dependencies = *n.Dependencies - } - - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteState used with pointer to nil ProviderSchema object") - } - - if obj != nil { - log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) - } else { - log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) - return nil, nil -} - -// EvalWriteStateDeposed is an EvalNode implementation that writes -// an InstanceState out to the Deposed list of a resource in the state. -type EvalWriteStateDeposed struct { - // Addr is the address of the instance to read state for. - Addr addrs.ResourceInstance - - // Key indicates which deposed object to write to. - Key states.DeposedKey - - // State is the object state to save. - State **states.ResourceInstanceObject - - // ProviderSchema is the schema for the provider given in ProviderAddr. - ProviderSchema **ProviderSchema - - // ProviderAddr is the address of the provider configuration that - // produced the given object. - ProviderAddr addrs.AbsProviderConfig -} - -func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - if n.State == nil { - // Note that a pointer _to_ nil is valid here, indicating the total - // absense of an object as we'd see during destroy. - panic("EvalWriteStateDeposed used with no ResourceInstanceObject") - } - - absAddr := n.Addr.Absolute(ctx.Path()) - key := n.Key - state := ctx.State() - - if key == states.NotDeposed { - // should never happen - return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) - } - - obj := *n.State - if obj == nil { - // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) - log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) - return nil, nil - } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - // Should never happen, unless our state object is nil - panic("EvalWriteStateDeposed used with no ProviderSchema object") - } - - schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) - if schema == nil { - // It shouldn't be possible to get this far in any real scenario - // without a schema, but we might end up here in contrived tests that - // fail to set up their world properly. - return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) - } - src, err := obj.Encode(schema.ImpliedType(), currentVersion) - if err != nil { - return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) - } - - log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) - state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) - return nil, nil -} - -// EvalDeposeState is an EvalNode implementation that moves the current object -// for the given instance to instead be a deposed object, leaving the instance -// with no current object. -// This is used at the beginning of a create-before-destroy replace action so -// that the create can create while preserving the old state of the -// to-be-destroyed object. -type EvalDeposeState struct { - Addr addrs.ResourceInstance - - // ForceKey, if a value other than states.NotDeposed, will be used as the - // key for the newly-created deposed object that results from this action. - // If set to states.NotDeposed (the zero value), a new unique key will be - // allocated. - ForceKey states.DeposedKey - - // OutputKey, if non-nil, will be written with the deposed object key that - // was generated for the object. This can then be passed to - // EvalUndeposeState.Key so it knows which deposed instance to forget. - OutputKey *states.DeposedKey -} - -// TODO: test -func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - state := ctx.State() - - var key states.DeposedKey - if n.ForceKey == states.NotDeposed { - key = state.DeposeResourceInstanceObject(absAddr) - } else { - key = n.ForceKey - state.DeposeResourceInstanceObjectForceKey(absAddr, key) - } - log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) - - if n.OutputKey != nil { - *n.OutputKey = key - } - - return nil, nil -} - -// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will -// restore a particular deposed object of the specified resource instance -// to be the "current" object if and only if the instance doesn't currently -// have a current object. -// -// This is intended for use when the create leg of a create before destroy -// fails with no partial new object: if we didn't take any action, the user -// would be left in the unfortunate situation of having no current object -// and the previously-workign object now deposed. This EvalNode causes a -// better outcome by restoring things to how they were before the replace -// operation began. -// -// The create operation may have produced a partial result even though it -// failed and it's important that we don't "forget" that state, so in that -// situation the prior object remains deposed and the partial new object -// remains the current object, allowing the situation to hopefully be -// improved in a subsequent run. -type EvalMaybeRestoreDeposedObject struct { - Addr addrs.ResourceInstance - - // PlannedChange might be the action we're performing that includes - // the possiblity of restoring a deposed object. However, it might also - // be nil. It's here only for use in error messages and must not be - // used for business logic. - PlannedChange **plans.ResourceInstanceChange - - // Key is a pointer to the deposed object key that should be forgotten - // from the state, which must be non-nil. - Key *states.DeposedKey -} - -// TODO: test -func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { - absAddr := n.Addr.Absolute(ctx.Path()) - dk := *n.Key - state := ctx.State() - - if dk == states.NotDeposed { - // This should never happen, and so it always indicates a bug. - // We should evaluate this node only if we've previously deposed - // an object as part of the same operation. - var diags tfdiags.Diagnostics - if n.PlannedChange != nil && *n.PlannedChange != nil { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Attempt to restore non-existent deposed object", - fmt.Sprintf( - "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This occurred during a %s action. This is a bug in Terraform; please report it!", - absAddr, (*n.PlannedChange).Action, - ), - )) - } else { - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Attempt to restore non-existent deposed object", - fmt.Sprintf( - "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This is a bug in Terraform; please report it!", - absAddr, - ), - )) - } - return nil, diags.Err() - } - - restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) - if restored { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) - } else { - log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) - } - - return nil, nil -} - -// EvalRefreshLifecycle is an EvalNode implementation that updates -// the status of the lifecycle options stored in the state. -// This currently only applies to create_before_destroy. -type EvalRefreshLifecycle struct { - Addr addrs.AbsResourceInstance - - Config *configs.Resource - // Prior State - State **states.ResourceInstanceObject - // ForceCreateBeforeDestroy indicates a create_before_destroy resource - // depends on this resource. - ForceCreateBeforeDestroy bool -} - -func (n *EvalRefreshLifecycle) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - if state == nil { - // no existing state - return nil, nil - } - - // In 0.13 we could be refreshing a resource with no config. - // We should be operating on managed resource, but check here to be certain - if n.Config == nil || n.Config.Managed == nil { - log.Printf("[WARN] EvalRefreshLifecycle: no Managed config value found in instance state for %q", n.Addr) - return nil, nil - } - - state.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy - - return nil, nil -} diff --git a/terraform/eval_state_test.go b/terraform/eval_state_test.go deleted file mode 100644 index a9c73cded..000000000 --- a/terraform/eval_state_test.go +++ /dev/null @@ -1,277 +0,0 @@ -package terraform - -import ( - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/states" -) - -func TestEvalReadState(t *testing.T) { - var output *states.ResourceInstanceObject - mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - }, - }) - providerSchema := mockProvider.GetSchemaReturn - provider := providers.Interface(mockProvider) - - cases := map[string]struct { - Resources map[string]*ResourceState - Node *EvalReadState - ExpectedInstanceId string - }{ - "ReadState gets primary instance state": { - Resources: map[string]*ResourceState{ - "aws_instance.bar": &ResourceState{ - Primary: &InstanceState{ - ID: "i-abc123", - }, - }, - }, - Node: &EvalReadState{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "bar", - }.Instance(addrs.NoKey), - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &output, - }, - ExpectedInstanceId: "i-abc123", - }, - } - - for k, c := range cases { - t.Run(k, func(t *testing.T) { - ctx := new(MockEvalContext) - state := MustShimLegacyState(&State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: c.Resources, - }, - }, - }) - ctx.StateState = state.SyncWrapper() - ctx.PathPath = addrs.RootModuleInstance - - result, err := c.Node.Eval(ctx) - if err != nil { - t.Fatalf("[%s] Got err: %#v", k, err) - } - - expected := c.ExpectedInstanceId - if !(result != nil && instanceObjectIdForTests(result.(*states.ResourceInstanceObject)) == expected) { - t.Fatalf("[%s] Expected return with ID %#v, got: %#v", k, expected, result) - } - - if !(output != nil && output.Value.GetAttr("id") == cty.StringVal(expected)) { - t.Fatalf("[%s] Expected output with ID %#v, got: %#v", k, expected, output) - } - - output = nil - }) - } -} - -func TestEvalReadStateDeposed(t *testing.T) { - var output *states.ResourceInstanceObject - mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - }, - }) - providerSchema := mockProvider.GetSchemaReturn - provider := providers.Interface(mockProvider) - - cases := map[string]struct { - Resources map[string]*ResourceState - Node *EvalReadStateDeposed - ExpectedInstanceId string - }{ - "ReadStateDeposed gets deposed instance": { - Resources: map[string]*ResourceState{ - "aws_instance.bar": &ResourceState{ - Deposed: []*InstanceState{ - &InstanceState{ID: "i-abc123"}, - }, - }, - }, - Node: &EvalReadStateDeposed{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "bar", - }.Instance(addrs.NoKey), - Key: states.DeposedKey("00000001"), // shim from legacy state assigns 0th deposed index this key - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &output, - }, - ExpectedInstanceId: "i-abc123", - }, - } - for k, c := range cases { - t.Run(k, func(t *testing.T) { - ctx := new(MockEvalContext) - state := MustShimLegacyState(&State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: rootModulePath, - Resources: c.Resources, - }, - }, - }) - ctx.StateState = state.SyncWrapper() - ctx.PathPath = addrs.RootModuleInstance - - result, err := c.Node.Eval(ctx) - if err != nil { - t.Fatalf("[%s] Got err: %#v", k, err) - } - - expected := c.ExpectedInstanceId - if !(result != nil && instanceObjectIdForTests(result.(*states.ResourceInstanceObject)) == expected) { - t.Fatalf("[%s] Expected return with ID %#v, got: %#v", k, expected, result) - } - - if !(output != nil && output.Value.GetAttr("id") == cty.StringVal(expected)) { - t.Fatalf("[%s] Expected output with ID %#v, got: %#v", k, expected, output) - } - - output = nil - }) - } -} - -func TestEvalWriteState(t *testing.T) { - state := states.NewState() - ctx := new(MockEvalContext) - ctx.StateState = state.SyncWrapper() - ctx.PathPath = addrs.RootModuleInstance - - mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - }, - }) - providerSchema := mockProvider.GetSchemaReturn - - obj := &states.ResourceInstanceObject{ - Value: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-abc123"), - }), - Status: states.ObjectReady, - } - node := &EvalWriteState{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey), - - State: &obj, - - ProviderSchema: &providerSchema, - ProviderAddr: addrs.RootModuleInstance.ProviderConfigDefault(addrs.NewDefaultProvider("aws")), - } - _, err := node.Eval(ctx) - if err != nil { - t.Fatalf("Got err: %#v", err) - } - - checkStateString(t, state, ` -aws_instance.foo: - ID = i-abc123 - provider = provider["registry.terraform.io/hashicorp/aws"] - `) -} - -func TestEvalWriteStateDeposed(t *testing.T) { - state := states.NewState() - ctx := new(MockEvalContext) - ctx.StateState = state.SyncWrapper() - ctx.PathPath = addrs.RootModuleInstance - - mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Optional: true, - }, - }, - }) - providerSchema := mockProvider.GetSchemaReturn - - obj := &states.ResourceInstanceObject{ - Value: cty.ObjectVal(map[string]cty.Value{ - "id": cty.StringVal("i-abc123"), - }), - Status: states.ObjectReady, - } - node := &EvalWriteStateDeposed{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey), - Key: states.DeposedKey("deadbeef"), - - State: &obj, - - ProviderSchema: &providerSchema, - ProviderAddr: addrs.RootModuleInstance.ProviderConfigDefault(addrs.NewDefaultProvider("aws")), - } - _, err := node.Eval(ctx) - if err != nil { - t.Fatalf("Got err: %#v", err) - } - - checkStateString(t, state, ` -aws_instance.foo: (1 deposed) - ID = - provider = provider["registry.terraform.io/hashicorp/aws"] - Deposed ID 1 = i-abc123 - `) -} - -func TestUpdateStateHook(t *testing.T) { - mockHook := new(MockHook) - - state := states.NewState() - state.Module(addrs.RootModuleInstance).SetLocalValue("foo", cty.StringVal("hello")) - - ctx := new(MockEvalContext) - ctx.HookHook = mockHook - ctx.StateState = state.SyncWrapper() - - if err := UpdateStateHook(ctx); err != nil { - t.Fatalf("err: %s", err) - } - - if !mockHook.PostStateUpdateCalled { - t.Fatal("should call PostStateUpdate") - } - if mockHook.PostStateUpdateState.LocalValue(addrs.LocalValue{Name: "foo"}.Absolute(addrs.RootModuleInstance)) != cty.StringVal("hello") { - t.Fatalf("wrong state passed to hook: %s", spew.Sdump(mockHook.PostStateUpdateState)) - } -} diff --git a/terraform/eval_validate.go b/terraform/eval_validate.go deleted file mode 100644 index c9f47b1b7..000000000 --- a/terraform/eval_validate.go +++ /dev/null @@ -1,609 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/configs" - "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/providers" - "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/tfdiags" - "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" - "github.com/zclconf/go-cty/cty/gocty" -) - -// EvalValidateCount is an EvalNode implementation that validates -// the count of a resource. -type EvalValidateCount struct { - Resource *configs.Resource -} - -// TODO: test -func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { - var diags tfdiags.Diagnostics - var count int - var err error - - val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - goto RETURN - } - if val.IsNull() || !val.IsKnown() { - goto RETURN - } - - err = gocty.FromCtyValue(val, &count) - if err != nil { - // The EvaluateExpr call above already guaranteed us a number value, - // so if we end up here then we have something that is out of range - // for an int, and the error message will include a description of - // the valid range. - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), - Subject: n.Resource.Count.Range().Ptr(), - }) - } else if count < 0 { - rawVal := val.AsBigFloat() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count value", - Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), - Subject: n.Resource.Count.Range().Ptr(), - }) - } - -RETURN: - return nil, diags.NonFatalErr() -} - -// EvalValidateProvisioner validates the configuration of a provisioner -// belonging to a resource. The provisioner config is expected to contain the -// merged connection configurations. -type EvalValidateProvisioner struct { - ResourceAddr addrs.Resource - Provisioner *provisioners.Interface - Schema **configschema.Block - Config *configs.Provisioner - ResourceHasCount bool - ResourceHasForEach bool -} - -func (n *EvalValidateProvisioner) Validate(ctx EvalContext) error { - provisioner := *n.Provisioner - config := *n.Config - schema := *n.Schema - - var diags tfdiags.Diagnostics - - // Validate the provisioner's own config first - configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) - diags = diags.Append(configDiags) - if configDiags.HasErrors() { - return diags.Err() - } - - if configVal == cty.NilVal { - // Should never happen for a well-behaved EvaluateBlock implementation - return fmt.Errorf("EvaluateBlock returned nil value") - } - - req := provisioners.ValidateProvisionerConfigRequest{ - Config: configVal, - } - - resp := provisioner.ValidateProvisionerConfig(req) - diags = diags.Append(resp.Diagnostics) - - // Now validate the connection config, which contains the merged bodies - // of the resource and provisioner connection blocks. - connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) - diags = diags.Append(connDiags) - - return diags.NonFatalErr() -} - -func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { - // We can't comprehensively validate the connection config since its - // final structure is decided by the communicator and we can't instantiate - // that until we have a complete instance state. However, we *can* catch - // configuration keys that are not valid for *any* communicator, catching - // typos early rather than waiting until we actually try to run one of - // the resource's provisioners. - - var diags tfdiags.Diagnostics - - if config == nil || config.Config == nil { - // No block to validate - return diags - } - - // We evaluate here just by evaluating the block and returning any - // diagnostics we get, since evaluation alone is enough to check for - // extraneous arguments and incorrectly-typed arguments. - _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) - diags = diags.Append(configDiags) - - return diags -} - -func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { - keyData := EvalDataForNoInstanceKey - selfAddr := n.ResourceAddr.Instance(addrs.NoKey) - - if n.ResourceHasCount { - // For a resource that has count, we allow count.index but don't - // know at this stage what it will return. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // "self" can't point to an unknown key, but we'll force it to be - // key 0 here, which should return an unknown value of the - // expected type since none of these elements are known at this - // point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) - } else if n.ResourceHasForEach { - // For a resource that has for_each, we allow each.value and each.key - // but don't know at this stage what it will return. - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.DynamicVal, - } - - // "self" can't point to an unknown key, but we'll force it to be - // key "" here, which should return an unknown value of the - // expected type since none of these elements are known at - // this point anyway. - selfAddr = n.ResourceAddr.Instance(addrs.StringKey("")) - } - - return ctx.EvaluateBlock(body, schema, selfAddr, keyData) -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. Once that is done, we can remove -// this and use a type-specific schema from the communicator to validate -// exactly what is expected for a given connection type. -var connectionBlockSupersetSchema = &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - // NOTE: "type" is not included here because it's treated special - // by the config loader and stored away in a separate field. - - // Common attributes for both connection types - "host": { - Type: cty.String, - Required: true, - }, - "type": { - Type: cty.String, - Optional: true, - }, - "user": { - Type: cty.String, - Optional: true, - }, - "password": { - Type: cty.String, - Optional: true, - }, - "port": { - Type: cty.String, - Optional: true, - }, - "timeout": { - Type: cty.String, - Optional: true, - }, - "script_path": { - Type: cty.String, - Optional: true, - }, - - // For type=ssh only (enforced in ssh communicator) - "private_key": { - Type: cty.String, - Optional: true, - }, - "certificate": { - Type: cty.String, - Optional: true, - }, - "host_key": { - Type: cty.String, - Optional: true, - }, - "agent": { - Type: cty.Bool, - Optional: true, - }, - "agent_identity": { - Type: cty.String, - Optional: true, - }, - "bastion_host": { - Type: cty.String, - Optional: true, - }, - "bastion_host_key": { - Type: cty.String, - Optional: true, - }, - "bastion_port": { - Type: cty.Number, - Optional: true, - }, - "bastion_user": { - Type: cty.String, - Optional: true, - }, - "bastion_password": { - Type: cty.String, - Optional: true, - }, - "bastion_private_key": { - Type: cty.String, - Optional: true, - }, - "bastion_certificate": { - Type: cty.String, - Optional: true, - }, - - // For type=winrm only (enforced in winrm communicator) - "https": { - Type: cty.Bool, - Optional: true, - }, - "insecure": { - Type: cty.Bool, - Optional: true, - }, - "cacert": { - Type: cty.String, - Optional: true, - }, - "use_ntlm": { - Type: cty.Bool, - Optional: true, - }, - }, -} - -// connectionBlockSupersetSchema is a schema representing the superset of all -// possible arguments for "connection" blocks across all supported connection -// types. -// -// This currently lives here because we've not yet updated our communicator -// subsystem to be aware of schema itself. It's exported only for use in the -// configs/configupgrade package and should not be used from anywhere else. -// The caller may not modify any part of the returned schema data structure. -func ConnectionBlockSupersetSchema() *configschema.Block { - return connectionBlockSupersetSchema -} - -// EvalValidateResource validates the configuration of a resource. -type EvalValidateResource struct { - Addr addrs.Resource - Provider *providers.Interface - ProviderSchema **ProviderSchema - Config *configs.Resource - ProviderMetas map[addrs.Provider]*configs.ProviderMeta - - // IgnoreWarnings means that warnings will not be passed through. This allows - // "just-in-time" passes of validation to continue execution through warnings. - IgnoreWarnings bool - - // ConfigVal, if non-nil, will be updated with the value resulting from - // evaluating the given configuration body. Since validation is performed - // very early, this value is likely to contain lots of unknown values, - // but its type will conform to the schema of the resource type associated - // with the resource instance being validated. - ConfigVal *cty.Value -} - -func (n *EvalValidateResource) Validate(ctx EvalContext) error { - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) - } - - var diags tfdiags.Diagnostics - provider := *n.Provider - cfg := *n.Config - schema := *n.ProviderSchema - mode := cfg.Mode - - keyData := EvalDataForNoInstanceKey - - switch { - case n.Config.Count != nil: - // If the config block has count, we'll evaluate with an unknown - // number as count.index so we can still type check even though - // we won't expand count until the plan phase. - keyData = InstanceKeyEvalData{ - CountIndex: cty.UnknownVal(cty.Number), - } - - // Basic type-checking of the count argument. More complete validation - // of this will happen when we DynamicExpand during the plan walk. - countDiags := n.validateCount(ctx, n.Config.Count) - diags = diags.Append(countDiags) - - case n.Config.ForEach != nil: - keyData = InstanceKeyEvalData{ - EachKey: cty.UnknownVal(cty.String), - EachValue: cty.UnknownVal(cty.DynamicPseudoType), - } - - // Evaluate the for_each expression here so we can expose the diagnostics - forEachDiags := n.validateForEach(ctx, n.Config.ForEach) - diags = diags.Append(forEachDiags) - } - - diags = diags.Append(validateDependsOn(ctx, n.Config.DependsOn)) - - // Validate the provider_meta block for the provider this resource - // belongs to, if there is one. - // - // Note: this will return an error for every resource a provider - // uses in a module, if the provider_meta for that module is - // incorrect. The only way to solve this that we've foudn is to - // insert a new ProviderMeta graph node in the graph, and make all - // that provider's resources in the module depend on the node. That's - // an awful heavy hammer to swing for this feature, which should be - // used only in limited cases with heavy coordination with the - // Terraform team, so we're going to defer that solution for a future - // enhancement to this functionality. - /* - if n.ProviderMetas != nil { - if m, ok := n.ProviderMetas[n.ProviderAddr.ProviderConfig.Type]; ok && m != nil { - // if the provider doesn't support this feature, throw an error - if (*n.ProviderSchema).ProviderMeta == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", cfg.ProviderConfigAddr()), - Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), - Subject: &m.ProviderRange, - }) - } else { - _, _, metaDiags := ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) - diags = diags.Append(metaDiags) - } - } - } - */ - // BUG(paddy): we're not validating provider_meta blocks on EvalValidate right now - // because the ProviderAddr for the resource isn't available on the EvalValidate - // struct. - - // Provider entry point varies depending on resource mode, because - // managed resources and data resources are two distinct concepts - // in the provider abstraction. - switch mode { - case addrs.ManagedResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid resource type", - Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return diags.Err() - } - - if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks - for _, traversal := range cfg.Managed.IgnoreChanges { - // This will error out if the traversal contains an invalid - // index step. That is OK if we want users to be able to ignore - // a key that is no longer specified in the config. - moreDiags := schema.StaticValidateTraversal(traversal) - diags = diags.Append(moreDiags) - if diags.HasErrors() { - continue - } - - // first check to see if this assigned in the config - v, _ := traversal.TraverseRel(configVal) - if !v.IsNull() { - // it's assigned, so we can also assume it's not computed-only - continue - } - - // We can't ignore changes that don't exist in the configuration. - // We're not checking specifically if the traversal resolves to - // a computed-only value, but we can hint to the user that it - // might also be the case. - sourceRange := traversal.SourceRange() - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Cannot ignore argument not set in the configuration", - Detail: fmt.Sprintf("The ignore_changes argument is not set in the configuration.\n" + - "The ignore_changes mechanism only applies to changes " + - "within the configuration, and must be used with " + - "arguments set in the configuration and not computed by " + - "the provider.", - ), - Subject: &sourceRange, - }) - return diags.Err() - } - } - - req := providers.ValidateResourceTypeConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateResourceTypeConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - - if n.ConfigVal != nil { - *n.ConfigVal = configVal - } - - case addrs.DataResourceMode: - schema, _ := schema.SchemaForResourceType(mode, cfg.Type) - if schema == nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid data source", - Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), - Subject: &cfg.TypeRange, - }) - return diags.Err() - } - - configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) - diags = diags.Append(valDiags) - if valDiags.HasErrors() { - return diags.Err() - } - - req := providers.ValidateDataSourceConfigRequest{ - TypeName: cfg.Type, - Config: configVal, - } - - resp := provider.ValidateDataSourceConfig(req) - diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) - } - - if n.IgnoreWarnings { - // If we _only_ have warnings then we'll return nil. - if diags.HasErrors() { - return diags.NonFatalErr() - } - return nil - } else { - // We'll return an error if there are any diagnostics at all, even if - // some of them are warnings. - return diags.NonFatalErr() - } -} - -func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { - if expr == nil { - return nil - } - - var diags tfdiags.Diagnostics - - countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) - diags = diags.Append(countDiags) - if diags.HasErrors() { - return diags - } - - if countVal.IsNull() { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is null. An integer is required.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - var err error - countVal, err = convert.Convert(countVal, cty.Number) - if err != nil { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk. - if !countVal.IsKnown() { - return diags - } - - // If we _do_ know the value, then we can do a few more checks here. - var count int - err = gocty.FromCtyValue(countVal, &count) - if err != nil { - // Isn't a whole number, etc. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), - Subject: expr.Range().Ptr(), - }) - return diags - } - - if count < 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid count argument", - Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, - Subject: expr.Range().Ptr(), - }) - return diags - } - - return diags -} - -func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { - val, forEachDiags := evaluateForEachExpressionValue(expr, ctx) - // If the value isn't known then that's the best we can do for now, but - // we'll check more thoroughly during the plan walk - if !val.IsKnown() { - return diags - } - - if forEachDiags.HasErrors() { - diags = diags.Append(forEachDiags) - } - - return diags -} - -func validateDependsOn(ctx EvalContext, dependsOn []hcl.Traversal) (diags tfdiags.Diagnostics) { - for _, traversal := range dependsOn { - ref, refDiags := addrs.ParseRef(traversal) - diags = diags.Append(refDiags) - if !refDiags.HasErrors() && len(ref.Remaining) != 0 { - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid depends_on reference", - Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", - Subject: ref.Remaining.SourceRange().Ptr(), - }) - } - - // The ref must also refer to something that exists. To test that, - // we'll just eval it and count on the fact that our evaluator will - // detect references to non-existent objects. - if !diags.HasErrors() { - scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) - if scope != nil { // sometimes nil in tests, due to incomplete mocks - _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) - diags = diags.Append(refDiags) - } - } - } - return diags -} diff --git a/terraform/eval_variable.go b/terraform/eval_variable.go index 72734b788..34669f966 100644 --- a/terraform/eval_variable.go +++ b/terraform/eval_variable.go @@ -18,14 +18,12 @@ import ( // This must be used only after any side-effects that make the value of the // variable available for use in expression evaluation, such as // EvalModuleCallArgument for variables in descendent modules. -func evalVariableValidations(addr addrs.AbsInputVariableInstance, config *configs.Variable, expr hcl.Expression, ctx EvalContext) error { +func evalVariableValidations(addr addrs.AbsInputVariableInstance, config *configs.Variable, expr hcl.Expression, ctx EvalContext) (diags tfdiags.Diagnostics) { if config == nil || len(config.Validations) == 0 { log.Printf("[TRACE] evalVariableValidations: not active for %s, so skipping", addr) return nil } - var diags tfdiags.Diagnostics - // Variable nodes evaluate in the parent module to where they were declared // because the value expression (n.Expr, if set) comes from the calling // "module" block in the parent module. @@ -83,6 +81,11 @@ func evalVariableValidations(addr addrs.AbsInputVariableInstance, config *config continue } + // Validation condition may be marked if the input variable is bound to + // a sensitive value. This is irrelevant to the validation process, so + // we discard the marks now. + result, _ = result.Unmark() + if result.False() { if expr != nil { diags = diags.Append(&hcl.Diagnostic{ @@ -105,5 +108,5 @@ func evalVariableValidations(addr addrs.AbsInputVariableInstance, config *config } } - return diags.ErrWithWarnings() + return diags } diff --git a/terraform/evaluate.go b/terraform/evaluate.go index 3fd02772d..c8b49c66c 100644 --- a/terraform/evaluate.go +++ b/terraform/evaluate.go @@ -145,7 +145,7 @@ func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.Sou diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to "count" in non-counted context`, - Detail: fmt.Sprintf(`The "count" object can only be used in "module", "resource", and "data" blocks, and only when the "count" argument is set.`), + Detail: `The "count" object can only be used in "module", "resource", and "data" blocks, and only when the "count" argument is set.`, Subject: rng.ToHCL().Ptr(), }) return cty.UnknownVal(cty.Number), diags @@ -177,7 +177,7 @@ func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `each.value cannot be used in this context`, - Detail: fmt.Sprintf(`A reference to "each.value" has been used in a context in which it unavailable, such as when the configuration no longer contains the value in its "for_each" expression. Remove this reference to each.value in your configuration to work around this error.`), + Detail: `A reference to "each.value" has been used in a context in which it unavailable, such as when the configuration no longer contains the value in its "for_each" expression. Remove this reference to each.value in your configuration to work around this error.`, Subject: rng.ToHCL().Ptr(), }) return cty.UnknownVal(cty.DynamicPseudoType), diags @@ -196,7 +196,7 @@ func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, Summary: `Reference to "each" in context without for_each`, - Detail: fmt.Sprintf(`The "each" object can be used only in "module" or "resource" blocks, and only when the "for_each" argument is set.`), + Detail: `The "each" object can be used only in "module" or "resource" blocks, and only when the "for_each" argument is set.`, Subject: rng.ToHCL().Ptr(), }) return cty.UnknownVal(cty.DynamicPseudoType), diags @@ -260,6 +260,10 @@ func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfd // being liberal in what it accepts because the subsequent plan walk has // more information available and so can be more conservative. if d.Operation == walkValidate { + // Ensure variable sensitivity is captured in the validate walk + if config.Sensitive { + return cty.UnknownVal(wantType).Mark("sensitive"), diags + } return cty.UnknownVal(wantType), diags } @@ -293,7 +297,8 @@ func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfd val = cty.UnknownVal(wantType) } - if config.Sensitive { + // Mark if sensitive, and avoid double-marking if this has already been marked + if config.Sensitive && !val.HasMark("sensitive") { val = val.Mark("sensitive") } @@ -428,7 +433,7 @@ func (d *evaluationStateData) GetModule(addr addrs.ModuleCall, rng tfdiags.Sourc instance[cfg.Name] = outputState - if cfg.Sensitive { + if cfg.Sensitive && !outputState.HasMark("sensitive") { instance[cfg.Name] = outputState.Mark("sensitive") } } @@ -457,7 +462,7 @@ func (d *evaluationStateData) GetModule(addr addrs.ModuleCall, rng tfdiags.Sourc instance[cfg.Name] = change.After - if change.Sensitive { + if change.Sensitive && !change.After.HasMark("sensitive") { instance[cfg.Name] = change.After.Mark("sensitive") } } diff --git a/terraform/evaluate_test.go b/terraform/evaluate_test.go index 2564a5c13..59202422d 100644 --- a/terraform/evaluate_test.go +++ b/terraform/evaluate_test.go @@ -99,11 +99,20 @@ func TestEvaluatorGetInputVariable(t *testing.T) { Sensitive: true, Default: cty.StringVal("foo"), }, + // Avoid double marking a value + "some_other_var": { + Name: "some_other_var", + Sensitive: true, + Default: cty.StringVal("bar"), + }, }, }, }, VariableValues: map[string]map[string]cty.Value{ - "": {"some_var": cty.StringVal("bar")}, + "": { + "some_var": cty.StringVal("bar"), + "some_other_var": cty.StringVal("boop").Mark("sensitive"), + }, }, VariableValuesLock: &sync.Mutex{}, } @@ -124,6 +133,18 @@ func TestEvaluatorGetInputVariable(t *testing.T) { if !got.RawEquals(want) { t.Errorf("wrong result %#v; want %#v", got, want) } + + want = cty.StringVal("boop").Mark("sensitive") + got, diags = scope.Data.GetInputVariable(addrs.InputVariable{ + Name: "some_other_var", + }, tfdiags.SourceRange{}) + + if len(diags) != 0 { + t.Errorf("unexpected diagnostics %s", spew.Sdump(diags)) + } + if !got.RawEquals(want) { + t.Errorf("wrong result %#v; want %#v", got, want) + } } func TestEvaluatorGetResource(t *testing.T) { diff --git a/terraform/evaluate_valid_test.go b/terraform/evaluate_valid_test.go index 9be7b278b..086ca3037 100644 --- a/terraform/evaluate_valid_test.go +++ b/terraform/evaluate_valid_test.go @@ -50,6 +50,14 @@ For example, to correlate with indices of a referring resource, use: aws_instance.count[count.index] - Unsupported attribute: This object has no argument, nested block, or exported attribute named "foo".`, }, + { + "boop_instance.yep", + ``, + }, + { + "boop_whatever.nope", + `Invalid resource type: A managed resource type "boop_whatever" is not supported by provider "registry.terraform.io/foobar/beep".`, + }, } cfg := testModule(t, "static-validate-refs") @@ -62,6 +70,12 @@ For example, to correlate with indices of a referring resource, use: "aws_instance": {}, }, }, + addrs.MustParseProviderSourceString("foobar/beep"): { + ResourceTypes: map[string]*configschema.Block{ + // intentional mismatch between resource type prefix and provider type + "boop_instance": {}, + }, + }, }, }, } diff --git a/terraform/execute.go b/terraform/execute.go index 5bf06c4d0..6d038d9d4 100644 --- a/terraform/execute.go +++ b/terraform/execute.go @@ -1,9 +1,9 @@ package terraform +import "github.com/hashicorp/terraform/tfdiags" + // GraphNodeExecutable is the interface that graph nodes must implement to -// enable execution. This is an alternative to GraphNodeEvalable, which is in -// the process of being removed. A given graph node should _not_ implement both -// GraphNodeExecutable and GraphNodeEvalable. +// enable execution. type GraphNodeExecutable interface { - Execute(EvalContext, walkOperation) error + Execute(EvalContext, walkOperation) tfdiags.Diagnostics } diff --git a/terraform/graph.go b/terraform/graph.go index c690b356b..5fa1ff283 100644 --- a/terraform/graph.go +++ b/terraform/graph.go @@ -38,8 +38,7 @@ func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { ctx := walker.EvalContext() // Walk the graph. - var walkFn dag.WalkFunc - walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { + walkFn := func(v dag.Vertex) (diags tfdiags.Diagnostics) { log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) defer func() { diff --git a/terraform/graph_builder.go b/terraform/graph_builder.go index 7d4bd88b6..0f740fad9 100644 --- a/terraform/graph_builder.go +++ b/terraform/graph_builder.go @@ -1,9 +1,7 @@ package terraform import ( - "fmt" "log" - "strings" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/internal/logging" @@ -40,12 +38,6 @@ func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Di } log.Printf("[TRACE] Executing graph transform %T", step) - stepName := fmt.Sprintf("%T", step) - dot := strings.LastIndex(stepName, ".") - if dot >= 0 { - stepName = stepName[dot+1:] - } - err := step.Transform(g) if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s ------", step, logging.Indent(thisStepStr)) diff --git a/terraform/graph_builder_apply.go b/terraform/graph_builder_apply.go index 5501319d8..b5e50e772 100644 --- a/terraform/graph_builder_apply.go +++ b/terraform/graph_builder_apply.go @@ -108,10 +108,6 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { // Attach the configuration to any resources &AttachResourceConfigTransformer{Config: b.Config}, - // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - // add providers TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), @@ -162,7 +158,6 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { // Close opened plugin connections &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, // close the root module &CloseRootModuleTransformer{}, diff --git a/terraform/graph_builder_apply_test.go b/terraform/graph_builder_apply_test.go index 3e1f6e29c..cf7076911 100644 --- a/terraform/graph_builder_apply_test.go +++ b/terraform/graph_builder_apply_test.go @@ -423,70 +423,6 @@ func TestApplyGraphBuilder_moduleDestroy(t *testing.T) { ) } -func TestApplyGraphBuilder_provisioner(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.foo"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Create, - }, - }, - }, - } - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-provisioner"), - Changes: changes, - Components: simpleMockComponentFactory(), - Schemas: simpleTestSchemas(), - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - testGraphContains(t, g, "provisioner.test") - testGraphHappensBefore( - t, g, - "provisioner.test", - "test_object.foo", - ) -} - -func TestApplyGraphBuilder_provisionerDestroy(t *testing.T) { - changes := &plans.Changes{ - Resources: []*plans.ResourceInstanceChangeSrc{ - { - Addr: mustResourceInstanceAddr("test_object.foo"), - ChangeSrc: plans.ChangeSrc{ - Action: plans.Delete, - }, - }, - }, - } - - b := &ApplyGraphBuilder{ - Config: testModule(t, "graph-builder-apply-provisioner"), - Changes: changes, - Components: simpleMockComponentFactory(), - Schemas: simpleTestSchemas(), - } - - g, err := b.Build(addrs.RootModuleInstance) - if err != nil { - t.Fatalf("err: %s", err) - } - - testGraphContains(t, g, "provisioner.test") - testGraphHappensBefore( - t, g, - "provisioner.test", - "test_object.foo (destroy)", - ) -} - func TestApplyGraphBuilder_targetModule(t *testing.T) { changes := &plans.Changes{ Resources: []*plans.ResourceInstanceChangeSrc{ @@ -784,7 +720,6 @@ module.child.test_object.create module.child.test_object.create (expand) module.child (expand) provider["registry.terraform.io/hashicorp/test"] - provisioner.test module.child.test_object.other module.child.test_object.create module.child.test_object.other (expand) @@ -795,13 +730,9 @@ provider["registry.terraform.io/hashicorp/test"] provider["registry.terraform.io/hashicorp/test"] (close) module.child.test_object.other test_object.other -provisioner.test -provisioner.test (close) - module.child.test_object.create root meta.count-boundary (EachMode fixup) provider["registry.terraform.io/hashicorp/test"] (close) - provisioner.test (close) test_object.create test_object.create (expand) test_object.create (expand) diff --git a/terraform/graph_builder_destroy_plan.go b/terraform/graph_builder_destroy_plan.go index d82162282..a16729507 100644 --- a/terraform/graph_builder_destroy_plan.go +++ b/terraform/graph_builder_destroy_plan.go @@ -12,7 +12,10 @@ import ( // planning a pure-destroy. // // Planning a pure destroy operation is simple because we can ignore most -// ordering configuration and simply reverse the state. +// ordering configuration and simply reverse the state. This graph mainly +// exists for targeting, because we need to walk the destroy dependencies to +// ensure we plan the required resources. Without the requirement for +// targeting, the plan could theoretically be created directly from the state. type DestroyPlanGraphBuilder struct { // Config is the configuration tree to build the plan from. Config *configs.Config @@ -72,6 +75,7 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { State: b.State, }, + // Create the delete changes for root module outputs. &OutputTransformer{ Config: b.Config, Destroy: true, @@ -93,8 +97,6 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { Schemas: b.Schemas, }, - // Target. Note we don't set "Destroy: true" here since we already - // created proper destroy ordering. &TargetsTransformer{Targets: b.Targets}, // Close opened plugin connections diff --git a/terraform/graph_builder_plan.go b/terraform/graph_builder_plan.go index 49184c2e2..2e3a35ccb 100644 --- a/terraform/graph_builder_plan.go +++ b/terraform/graph_builder_plan.go @@ -115,10 +115,6 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { // Attach the configuration to any resources &AttachResourceConfigTransformer{Config: b.Config}, - // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, - &ProvisionerTransformer{}, - // add providers TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), diff --git a/terraform/graph_builder_plan_test.go b/terraform/graph_builder_plan_test.go index 84fec8740..3da3cf16a 100644 --- a/terraform/graph_builder_plan_test.go +++ b/terraform/graph_builder_plan_test.go @@ -16,23 +16,16 @@ func TestPlanGraphBuilder_impl(t *testing.T) { func TestPlanGraphBuilder(t *testing.T) { awsProvider := &MockProvider{ - GetSchemaReturn: &ProviderSchema{ - Provider: simpleTestSchema(), - ResourceTypes: map[string]*configschema.Block{ - "aws_security_group": simpleTestSchema(), - "aws_instance": simpleTestSchema(), - "aws_load_balancer": simpleTestSchema(), - }, - }, - } - openstackProvider := &MockProvider{ - GetSchemaReturn: &ProviderSchema{ - Provider: simpleTestSchema(), - ResourceTypes: map[string]*configschema.Block{ - "openstack_floating_ip": simpleTestSchema(), + GetSchemaResponse: &providers.GetSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "aws_security_group": {Block: simpleTestSchema()}, + "aws_instance": {Block: simpleTestSchema()}, + "aws_load_balancer": {Block: simpleTestSchema()}, }, }, } + openstackProvider := mockProviderWithResourceTypeSchema("openstack_floating_ip", simpleTestSchema()) components := &basicComponentFactory{ providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("aws"): providers.FactoryFixed(awsProvider), @@ -45,8 +38,8 @@ func TestPlanGraphBuilder(t *testing.T) { Components: components, Schemas: &Schemas{ Providers: map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("aws"): awsProvider.GetSchemaReturn, - addrs.NewDefaultProvider("openstack"): openstackProvider.GetSchemaReturn, + addrs.NewDefaultProvider("aws"): awsProvider.ProviderSchema(), + addrs.NewDefaultProvider("openstack"): openstackProvider.ProviderSchema(), }, }, } @@ -68,28 +61,22 @@ func TestPlanGraphBuilder(t *testing.T) { } func TestPlanGraphBuilder_dynamicBlock(t *testing.T) { - provider := &MockProvider{ - GetSchemaReturn: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { + provider := mockProviderWithResourceTypeSchema("test_thing", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "list": {Type: cty.List(cty.String), Computed: true}, + }, + BlockTypes: map[string]*configschema.NestedBlock{ + "nested": { + Nesting: configschema.NestingList, + Block: configschema.Block{ Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "list": {Type: cty.List(cty.String), Computed: true}, - }, - BlockTypes: map[string]*configschema.NestedBlock{ - "nested": { - Nesting: configschema.NestingList, - Block: configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "foo": {Type: cty.String, Optional: true}, - }, - }, - }, + "foo": {Type: cty.String, Optional: true}, }, }, }, }, - } + }) components := &basicComponentFactory{ providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("test"): providers.FactoryFixed(provider), @@ -101,7 +88,7 @@ func TestPlanGraphBuilder_dynamicBlock(t *testing.T) { Components: components, Schemas: &Schemas{ Providers: map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("test"): provider.GetSchemaReturn, + addrs.NewDefaultProvider("test"): provider.ProviderSchema(), }, }, } @@ -144,23 +131,17 @@ test_thing.c (expand) } func TestPlanGraphBuilder_attrAsBlocks(t *testing.T) { - provider := &MockProvider{ - GetSchemaReturn: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_thing": { - Attributes: map[string]*configschema.Attribute{ - "id": {Type: cty.String, Computed: true}, - "nested": { - Type: cty.List(cty.Object(map[string]cty.Type{ - "foo": cty.String, - })), - Optional: true, - }, - }, - }, + provider := mockProviderWithResourceTypeSchema("test_thing", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "nested": { + Type: cty.List(cty.Object(map[string]cty.Type{ + "foo": cty.String, + })), + Optional: true, }, }, - } + }) components := &basicComponentFactory{ providers: map[addrs.Provider]providers.Factory{ addrs.NewDefaultProvider("test"): providers.FactoryFixed(provider), @@ -172,7 +153,7 @@ func TestPlanGraphBuilder_attrAsBlocks(t *testing.T) { Components: components, Schemas: &Schemas{ Providers: map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("test"): provider.GetSchemaReturn, + addrs.NewDefaultProvider("test"): provider.ProviderSchema(), }, }, } @@ -233,14 +214,7 @@ func TestPlanGraphBuilder_targetModule(t *testing.T) { } func TestPlanGraphBuilder_forEach(t *testing.T) { - awsProvider := &MockProvider{ - GetSchemaReturn: &ProviderSchema{ - Provider: simpleTestSchema(), - ResourceTypes: map[string]*configschema.Block{ - "aws_instance": simpleTestSchema(), - }, - }, - } + awsProvider := mockProviderWithResourceTypeSchema("aws_instance", simpleTestSchema()) components := &basicComponentFactory{ providers: map[addrs.Provider]providers.Factory{ @@ -253,7 +227,7 @@ func TestPlanGraphBuilder_forEach(t *testing.T) { Components: components, Schemas: &Schemas{ Providers: map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("aws"): awsProvider.GetSchemaReturn, + addrs.NewDefaultProvider("aws"): awsProvider.ProviderSchema(), }, }, } diff --git a/terraform/graph_test.go b/terraform/graph_test.go index 7dac808e8..e61da9878 100644 --- a/terraform/graph_test.go +++ b/terraform/graph_test.go @@ -68,37 +68,3 @@ func testGraphHappensBefore(t *testing.T, g *Graph, A, B string) { "Expected %q before %q in:\n\n%s", A, B, g.String()) } - -type testGraphSubPath struct { - PathFn func() []string -} - -func (v *testGraphSubPath) Path() []string { return v.PathFn() } - -type testGraphDependable struct { - VertexName string - DependentOnMock []string -} - -func (v *testGraphDependable) Name() string { - return v.VertexName -} - -func (v *testGraphDependable) DependableName() []string { - return []string{v.VertexName} -} - -func (v *testGraphDependable) DependentOn() []string { - return v.DependentOnMock -} - -const testGraphAddStr = ` -42 -84 -` - -const testGraphConnectDepsStr = ` -a -b - a -` diff --git a/terraform/graph_walk_context.go b/terraform/graph_walk_context.go index c7492ed06..70cf4aff4 100644 --- a/terraform/graph_walk_context.go +++ b/terraform/graph_walk_context.go @@ -35,7 +35,6 @@ type ContextGraphWalker struct { // is in progress. NonFatalDiagnostics tfdiags.Diagnostics - errorLock sync.Mutex once sync.Once contexts map[string]*BuiltinEvalContext contextLock sync.Mutex @@ -123,39 +122,7 @@ func (w *ContextGraphWalker) init() { func (w *ContextGraphWalker) Execute(ctx EvalContext, n GraphNodeExecutable) tfdiags.Diagnostics { // Acquire a lock on the semaphore w.Context.parallelSem.Acquire() + defer w.Context.parallelSem.Release() - err := n.Execute(ctx, w.Operation) - - // Release the semaphore - w.Context.parallelSem.Release() - - if err == nil { - return nil - } - - // Acquire the lock because anything is going to require a lock. - w.errorLock.Lock() - defer w.errorLock.Unlock() - - // If the error is non-fatal then we'll accumulate its diagnostics in our - // non-fatal list, rather than returning it directly, so that the graph - // walk can continue. - if nferr, ok := err.(tfdiags.NonFatalError); ok { - w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) - return nil - } - - // If we early exit, it isn't an error. - if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { - return nil - } - - // Otherwise, we'll let our usual diagnostics machinery figure out how to - // unpack this as one or more diagnostic messages and return that. If we - // get down here then the returned diagnostics will contain at least one - // error, causing the graph walk to halt. - var diags tfdiags.Diagnostics - diags = diags.Append(err) - return diags - + return n.Execute(ctx, w.Operation) } diff --git a/terraform/hook.go b/terraform/hook.go index c0bb23ab2..1887c236a 100644 --- a/terraform/hook.go +++ b/terraform/hook.go @@ -143,19 +143,3 @@ func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []provi func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { return HookActionContinue, nil } - -// handleHook turns hook actions into panics. This lets you use the -// panic/recover mechanism in Go as a flow control mechanism for hook -// actions. -func handleHook(a HookAction, err error) { - if err != nil { - // TODO: handle errors - } - - switch a { - case HookActionContinue: - return - case HookActionHalt: - panic(HookActionHalt) - } -} diff --git a/terraform/hook_stop.go b/terraform/hook_stop.go index 811fb337c..86f221142 100644 --- a/terraform/hook_stop.go +++ b/terraform/hook_stop.go @@ -1,6 +1,7 @@ package terraform import ( + "errors" "sync/atomic" "github.com/zclconf/go-cty/cty" @@ -76,11 +77,7 @@ func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { func (h *stopHook) hook() (HookAction, error) { if h.Stopped() { - // FIXME: This should really return an error since stopping partway - // through is not a successful run-to-completion, but we'll need to - // introduce that cautiously since existing automation solutions may - // be depending on this behavior. - return HookActionHalt, nil + return HookActionHalt, errors.New("execution halted") } return HookActionContinue, nil diff --git a/terraform/marks.go b/terraform/marks.go new file mode 100644 index 000000000..8e2a32607 --- /dev/null +++ b/terraform/marks.go @@ -0,0 +1,39 @@ +package terraform + +import ( + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +// marksEqual compares 2 unordered sets of PathValue marks for equality, with +// the comparison using the cty.PathValueMarks.Equal method. +func marksEqual(a, b []cty.PathValueMarks) bool { + if len(a) == 0 && len(b) == 0 { + return true + } + + if len(a) != len(b) { + return false + } + + less := func(s []cty.PathValueMarks) func(i, j int) bool { + return func(i, j int) bool { + // the sort only needs to be consistent, so use the GoString format + // to get a comparable value + return fmt.Sprintf("%#v", s[i]) < fmt.Sprintf("%#v", s[j]) + } + } + + sort.Slice(a, less(a)) + sort.Slice(b, less(b)) + + for i := 0; i < len(a); i++ { + if !a[i].Equal(b[i]) { + return false + } + } + + return true +} diff --git a/terraform/marks_test.go b/terraform/marks_test.go new file mode 100644 index 000000000..efb3b7e9b --- /dev/null +++ b/terraform/marks_test.go @@ -0,0 +1,104 @@ +package terraform + +import ( + "fmt" + "testing" + + "github.com/zclconf/go-cty/cty" +) + +func TestMarksEqual(t *testing.T) { + for i, tc := range []struct { + a, b []cty.PathValueMarks + equal bool + }{ + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + true, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "A"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + false, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks("sensitive")}, + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks("sensitive")}, + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "c"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks("sensitive")}, + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "c"}}, Marks: cty.NewValueMarks("sensitive")}, + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + true, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{ + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks("sensitive"), + }, + cty.PathValueMarks{ + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "c"}}, + Marks: cty.NewValueMarks("sensitive"), + }, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{ + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "c"}}, + Marks: cty.NewValueMarks("sensitive"), + }, + cty.PathValueMarks{ + Path: cty.Path{cty.GetAttrStep{Name: "a"}, cty.GetAttrStep{Name: "b"}}, + Marks: cty.NewValueMarks("sensitive"), + }, + }, + true, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "b"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + false, + }, + { + nil, + nil, + true, + }, + { + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + nil, + false, + }, + { + nil, + []cty.PathValueMarks{ + cty.PathValueMarks{Path: cty.Path{cty.GetAttrStep{Name: "a"}}, Marks: cty.NewValueMarks("sensitive")}, + }, + false, + }, + } { + t.Run(fmt.Sprint(i), func(t *testing.T) { + if marksEqual(tc.a, tc.b) != tc.equal { + t.Fatalf("marksEqual(\n%#v,\n%#v,\n) != %t\n", tc.a, tc.b, tc.equal) + } + }) + } +} diff --git a/terraform/node_count_boundary.go b/terraform/node_count_boundary.go index bfdbd1efa..2e972ff21 100644 --- a/terraform/node_count_boundary.go +++ b/terraform/node_count_boundary.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" ) // NodeCountBoundary fixes up any transitions between "each modes" in objects @@ -14,12 +15,14 @@ type NodeCountBoundary struct { Config *configs.Config } +var _ GraphNodeExecutable = (*NodeCountBoundary)(nil) + func (n *NodeCountBoundary) Name() string { return "meta.count-boundary (EachMode fixup)" } // GraphNodeExecutable -func (n *NodeCountBoundary) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeCountBoundary) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { // We'll temporarily lock the state to grab the modules, then work on each // one separately while taking a lock again for each separate resource. // This means that if another caller concurrently adds a module here while @@ -42,10 +45,11 @@ func (n *NodeCountBoundary) Execute(ctx EvalContext, op walkOperation) error { continue } if err := n.fixModule(ctx, addr); err != nil { - return err + diags = diags.Append(err) + return diags } } - return nil + return diags } func (n *NodeCountBoundary) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { diff --git a/terraform/node_count_boundary_test.go b/terraform/node_count_boundary_test.go index 13c5fac29..497d4fc96 100644 --- a/terraform/node_count_boundary_test.go +++ b/terraform/node_count_boundary_test.go @@ -53,9 +53,9 @@ func TestNodeCountBoundaryExecute(t *testing.T) { } node := NodeCountBoundary{Config: config} - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } if !state.HasResources() { t.Fatal("resources missing from state") diff --git a/terraform/node_data_destroy.go b/terraform/node_data_destroy.go index 731371920..14c06516b 100644 --- a/terraform/node_data_destroy.go +++ b/terraform/node_data_destroy.go @@ -1,6 +1,10 @@ package terraform -import "log" +import ( + "log" + + "github.com/hashicorp/terraform/tfdiags" +) // NodeDestroyableDataResourceInstance represents a resource that is "destroyable": // it is ready to be destroyed. @@ -13,7 +17,7 @@ var ( ) // GraphNodeExecutable -func (n *NodeDestroyableDataResourceInstance) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeDestroyableDataResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { log.Printf("[TRACE] NodeDestroyableDataResourceInstance: removing state object for %s", n.Addr) ctx.State().SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) return nil diff --git a/terraform/node_data_destroy_test.go b/terraform/node_data_destroy_test.go index 24fb988dd..32d07b143 100644 --- a/terraform/node_data_destroy_test.go +++ b/terraform/node_data_destroy_test.go @@ -36,9 +36,9 @@ func TestNodeDataDestroyExecute(t *testing.T) { }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), }} - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %v", diags.Err()) } // verify resource removed from state diff --git a/terraform/node_local.go b/terraform/node_local.go index 055492fe8..eb04b3954 100644 --- a/terraform/node_local.go +++ b/terraform/node_local.go @@ -128,10 +128,7 @@ func (n *NodeLocal) References() []*addrs.Reference { // NodeLocal.Execute is an Execute implementation that evaluates the // expression for a local value and writes it into a transient part of // the state. -func (n *NodeLocal) Execute(ctx EvalContext, op walkOperation) error { - - var diags tfdiags.Diagnostics - +func (n *NodeLocal) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { expr := n.Config.Expr addr := n.Addr.LocalValue @@ -150,23 +147,24 @@ func (n *NodeLocal) Execute(ctx EvalContext, op walkOperation) error { } } if diags.HasErrors() { - return diags.Err() + return diags } val, moreDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) diags = diags.Append(moreDiags) if moreDiags.HasErrors() { - return diags.Err() + return diags } state := ctx.State() if state == nil { - return fmt.Errorf("cannot write local value to nil state") + diags = diags.Append(fmt.Errorf("cannot write local value to nil state")) + return diags } state.SetLocalValue(addr.Absolute(ctx.Path()), val) - return nil + return diags } // dag.GraphNodeDotter impl. diff --git a/terraform/node_module_expand.go b/terraform/node_module_expand.go index 1e27fd274..07ff4545c 100644 --- a/terraform/node_module_expand.go +++ b/terraform/node_module_expand.go @@ -99,7 +99,7 @@ func (n *nodeExpandModule) ReferenceOutside() (selfPath, referencePath addrs.Mod } // GraphNodeExecutable -func (n *nodeExpandModule) Execute(ctx EvalContext, op walkOperation) error { +func (n *nodeExpandModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { expander := ctx.InstanceExpander() _, call := n.Addr.Call() @@ -110,16 +110,18 @@ func (n *nodeExpandModule) Execute(ctx EvalContext, op walkOperation) error { ctx = ctx.WithPath(module) switch { case n.ModuleCall.Count != nil: - count, diags := evaluateCountExpression(n.ModuleCall.Count, ctx) + count, ctDiags := evaluateCountExpression(n.ModuleCall.Count, ctx) + diags = diags.Append(ctDiags) if diags.HasErrors() { - return diags.Err() + return diags } expander.SetModuleCount(module, call, count) case n.ModuleCall.ForEach != nil: - forEach, diags := evaluateForEachExpression(n.ModuleCall.ForEach, ctx) + forEach, feDiags := evaluateForEachExpression(n.ModuleCall.ForEach, ctx) + diags = diags.Append(feDiags) if diags.HasErrors() { - return diags.Err() + return diags } expander.SetModuleForEach(module, call, forEach) @@ -128,7 +130,7 @@ func (n *nodeExpandModule) Execute(ctx EvalContext, op walkOperation) error { } } - return nil + return diags } @@ -139,6 +141,8 @@ func (n *nodeExpandModule) Execute(ctx EvalContext, op walkOperation) error { // Besides providing a root node for dependency ordering, nodeCloseModule also // cleans up state after all the module nodes have been evaluated, removing // empty resources and modules from the state. +// The root module instance also closes any remaining provisioner plugins which +// do not have a lifecycle controlled by individual graph nodes. type nodeCloseModule struct { Addr addrs.Module } @@ -146,6 +150,7 @@ type nodeCloseModule struct { var ( _ GraphNodeReferenceable = (*nodeCloseModule)(nil) _ GraphNodeReferenceOutside = (*nodeCloseModule)(nil) + _ GraphNodeExecutable = (*nodeCloseModule)(nil) ) func (n *nodeCloseModule) ModulePath() addrs.Module { @@ -170,7 +175,13 @@ func (n *nodeCloseModule) Name() string { return n.Addr.String() + " (close)" } -func (n *nodeCloseModule) Execute(ctx EvalContext, op walkOperation) error { +func (n *nodeCloseModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + if n.Addr.IsRoot() { + // If this is the root module, we are cleaning up the walk, so close + // any running provisioners + diags = diags.Append(ctx.CloseProvisioners()) + } + switch op { case walkApply, walkDestroy: state := ctx.State().Lock() @@ -206,10 +217,11 @@ type nodeValidateModule struct { nodeExpandModule } +var _ GraphNodeExecutable = (*nodeValidateModule)(nil) + // GraphNodeEvalable -func (n *nodeValidateModule) Execute(ctx EvalContext, op walkOperation) error { +func (n *nodeValidateModule) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { _, call := n.Addr.Call() - var diags tfdiags.Diagnostics expander := ctx.InstanceExpander() // Modules all evaluate to single instances during validation, only to @@ -228,7 +240,7 @@ func (n *nodeValidateModule) Execute(ctx EvalContext, op walkOperation) error { diags = diags.Append(countDiags) case n.ModuleCall.ForEach != nil: - _, forEachDiags := evaluateForEachExpressionValue(n.ModuleCall.ForEach, ctx) + _, forEachDiags := evaluateForEachExpressionValue(n.ModuleCall.ForEach, ctx, true) diags = diags.Append(forEachDiags) } @@ -238,9 +250,5 @@ func (n *nodeValidateModule) Execute(ctx EvalContext, op walkOperation) error { expander.SetModuleSingle(module, call) } - if diags.HasErrors() { - return diags.ErrWithWarnings() - } - - return nil + return diags } diff --git a/terraform/node_module_expand_test.go b/terraform/node_module_expand_test.go index 578b4276f..c83d16934 100644 --- a/terraform/node_module_expand_test.go +++ b/terraform/node_module_expand_test.go @@ -42,9 +42,9 @@ func TestNodeCloseModuleExecute(t *testing.T) { StateState: state.SyncWrapper(), } node := nodeCloseModule{addrs.Module{"child"}} - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } // Since module.child has no resources, it should be removed @@ -62,9 +62,9 @@ func TestNodeCloseModuleExecute(t *testing.T) { } node := nodeCloseModule{addrs.Module{"child"}} - err := node.Execute(ctx, walkImport) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkImport) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } if _, ok := state.Modules["module.child"]; !ok { t.Fatal("module.child was removed from state, expected no-op") @@ -87,9 +87,9 @@ func TestNodeValidateModuleExecute(t *testing.T) { }, } - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %v", diags.Err()) } }) diff --git a/terraform/node_module_variable.go b/terraform/node_module_variable.go index 78ddb6e8f..675242d76 100644 --- a/terraform/node_module_variable.go +++ b/terraform/node_module_variable.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform/dag" "github.com/hashicorp/terraform/instances" "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" ) @@ -141,7 +142,7 @@ func (n *nodeModuleVariable) ModulePath() addrs.Module { } // GraphNodeExecutable -func (n *nodeModuleVariable) Execute(ctx EvalContext, op walkOperation) error { +func (n *nodeModuleVariable) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { // If we have no value, do nothing if n.Expr == nil { return nil @@ -154,14 +155,16 @@ func (n *nodeModuleVariable) Execute(ctx EvalContext, op walkOperation) error { switch op { case walkValidate: - vals, err = n.EvalModuleCallArgument(ctx, true) - if err != nil { - return err + vals, err = n.evalModuleCallArgument(ctx, true) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } default: - vals, err = n.EvalModuleCallArgument(ctx, false) - if err != nil { - return err + vals, err = n.evalModuleCallArgument(ctx, false) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } } @@ -184,7 +187,7 @@ func (n *nodeModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNod } } -// EvalModuleCallArgument produces the value for a particular variable as will +// evalModuleCallArgument produces the value for a particular variable as will // be used by a child module instance. // // The result is written into a map, with its key set to the local name of the @@ -196,7 +199,7 @@ func (n *nodeModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNod // validateOnly indicates that this evaluation is only for config // validation, and we will not have any expansion module instance // repetition data. -func (n *nodeModuleVariable) EvalModuleCallArgument(ctx EvalContext, validateOnly bool) (map[string]cty.Value, error) { +func (n *nodeModuleVariable) evalModuleCallArgument(ctx EvalContext, validateOnly bool) (map[string]cty.Value, error) { wantType := n.Config.Type name := n.Addr.Variable.Name expr := n.Expr diff --git a/terraform/node_output.go b/terraform/node_output.go index 00f39c80e..c70c76e9c 100644 --- a/terraform/node_output.go +++ b/terraform/node_output.go @@ -246,11 +246,10 @@ func (n *NodeApplyableOutput) References() []*addrs.Reference { } // GraphNodeExecutable -func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) error { - var diags tfdiags.Diagnostics +func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { state := ctx.State() if state == nil { - return nil + return } changes := ctx.Changes() // may be nil, if we're not working on a changeset @@ -297,9 +296,24 @@ func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) error { // marked as unknown. If the evaluator was able to find a type // for the value in spite of the error then we'll use it. n.setValue(state, changes, cty.UnknownVal(val.Type())) - return EvalEarlyExitError{} + + // Keep existing warnings, while converting errors to warnings. + // This is not meant to be the normal path, so there no need to + // make the errors pretty. + var warnings tfdiags.Diagnostics + for _, d := range diags { + switch d.Severity() { + case tfdiags.Warning: + warnings = warnings.Append(d) + case tfdiags.Error: + desc := d.Description() + warnings = warnings.Append(tfdiags.SimpleWarning(fmt.Sprintf("%s:%s", desc.Summary, desc.Detail))) + } + } + + return warnings } - return diags.Err() + return diags } n.setValue(state, changes, val) @@ -309,7 +323,7 @@ func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) error { n.setValue(state, changes, val) } - return nil + return diags } // dag.GraphNodeDotter impl. @@ -350,7 +364,7 @@ func (n *NodeDestroyableOutput) temporaryValue() bool { } // GraphNodeExecutable -func (n *NodeDestroyableOutput) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeDestroyableOutput) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { state := ctx.State() if state == nil { return nil @@ -418,12 +432,17 @@ func (n *NodeApplyableOutput) setValue(state *states.SyncState, changes *plans.C // the diff sensitiveBefore := false before := cty.NullVal(cty.DynamicPseudoType) + + // is this output new to our state? + newOutput := true + mod := state.Module(n.Addr.Module) if n.Addr.Module.IsRoot() && mod != nil { for name, o := range mod.OutputValues { if name == n.Addr.OutputValue.Name { before = o.Value sensitiveBefore = o.Sensitive + newOutput = false break } } @@ -437,12 +456,15 @@ func (n *NodeApplyableOutput) setValue(state *states.SyncState, changes *plans.C // strip any marks here just to be sure we don't panic on the True comparison val, _ = val.UnmarkDeep() - var action plans.Action + action := plans.Update switch { - case val.IsNull(): - action = plans.Delete + case val.IsNull() && before.IsNull(): + // This is separate from the NoOp case below, since we can ignore + // sensitivity here when there are only null values. + action = plans.NoOp - case before.IsNull(): + case newOutput: + // This output was just added to the configuration action = plans.Create case val.IsWhollyKnown() && @@ -453,9 +475,6 @@ func (n *NodeApplyableOutput) setValue(state *states.SyncState, changes *plans.C // only one we can act on, and the state will have been loaded // without any marks to consider. action = plans.NoOp - - default: - action = plans.Update } change := &plans.OutputChange{ @@ -473,7 +492,7 @@ func (n *NodeApplyableOutput) setValue(state *states.SyncState, changes *plans.C // Should never happen, since we just constructed this right above panic(fmt.Sprintf("planned change for %s could not be encoded: %s", n.Addr, err)) } - log.Printf("[TRACE] ExecuteWriteOutput: Saving %s change for %s in changeset", change.Action, n.Addr) + log.Printf("[TRACE] setValue: Saving %s change for %s in changeset", change.Action, n.Addr) changes.RemoveOutputChange(n.Addr) // remove any existing planned change, if present changes.AppendOutputChange(cs) // add the new planned change } @@ -482,12 +501,12 @@ func (n *NodeApplyableOutput) setValue(state *states.SyncState, changes *plans.C // The state itself doesn't represent unknown values, so we null them // out here and then we'll save the real unknown value in the planned // changeset below, if we have one on this graph walk. - log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", n.Addr) + log.Printf("[TRACE] setValue: Saving value for %s in state", n.Addr) unmarkedVal, _ := val.UnmarkDeep() stateVal := cty.UnknownAsNull(unmarkedVal) state.SetOutputValue(n.Addr, stateVal, n.Config.Sensitive) } else { - log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", n.Addr) + log.Printf("[TRACE] setValue: Removing %s from state (it is now null)", n.Addr) state.RemoveOutputValue(n.Addr) } diff --git a/terraform/node_output_test.go b/terraform/node_output_test.go index 63fd35214..3b518f046 100644 --- a/terraform/node_output_test.go +++ b/terraform/node_output_test.go @@ -81,11 +81,11 @@ func TestNodeApplyableOutputExecute_invalidDependsOn(t *testing.T) { }) ctx.EvaluateExprResult = val - err := node.Execute(ctx, walkApply) - if err == nil { + diags := node.Execute(ctx, walkApply) + if !diags.HasErrors() { t.Fatal("expected execute error, but there was none") } - if got, want := err.Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { + if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { t.Errorf("expected error to include %q, but was: %s", want, got) } } @@ -102,11 +102,11 @@ func TestNodeApplyableOutputExecute_sensitiveValueNotOutput(t *testing.T) { }) ctx.EvaluateExprResult = val - err := node.Execute(ctx, walkApply) - if err == nil { + diags := node.Execute(ctx, walkApply) + if !diags.HasErrors() { t.Fatal("expected execute error, but there was none") } - if got, want := err.Error(), "Output refers to sensitive values"; !strings.Contains(got, want) { + if got, want := diags.Err().Error(), "Output refers to sensitive values"; !strings.Contains(got, want) { t.Errorf("expected error to include %q, but was: %s", want, got) } } @@ -151,9 +151,9 @@ func TestNodeDestroyableOutputExecute(t *testing.T) { } node := NodeDestroyableOutput{Addr: outputAddr} - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("Unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err()) } if state.OutputValue(outputAddr) != nil { t.Fatal("Unexpected outputs in state after removal") diff --git a/terraform/node_provider.go b/terraform/node_provider.go index c901eb417..0ea431493 100644 --- a/terraform/node_provider.go +++ b/terraform/node_provider.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" ) // NodeApplyableProvider represents a provider during an apply. @@ -20,36 +21,45 @@ var ( ) // GraphNodeExecutable -func (n *NodeApplyableProvider) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeApplyableProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { _, err := ctx.InitProvider(n.Addr) - if err != nil { - return err + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - provider, _, err := GetProvider(ctx, n.Addr) - if err != nil { - return err + provider, _, err := getProvider(ctx, n.Addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } switch op { case walkValidate: - return n.ValidateProvider(ctx, provider) + return diags.Append(n.ValidateProvider(ctx, provider)) case walkPlan, walkApply, walkDestroy: - return n.ConfigureProvider(ctx, provider, false) + return diags.Append(n.ConfigureProvider(ctx, provider, false)) case walkImport: - return n.ConfigureProvider(ctx, provider, true) + return diags.Append(n.ConfigureProvider(ctx, provider, true)) } - return nil + return diags } -func (n *NodeApplyableProvider) ValidateProvider(ctx EvalContext, provider providers.Interface) error { - var diags tfdiags.Diagnostics +func (n *NodeApplyableProvider) ValidateProvider(ctx EvalContext, provider providers.Interface) (diags tfdiags.Diagnostics) { configBody := buildProviderConfig(ctx, n.Addr, n.ProviderConfig()) + // if a provider config is empty (only an alias), return early and don't continue + // validation. validate doesn't need to fully configure the provider itself, so + // skipping a provider with an implied configuration won't prevent other validation from completing. + _, noConfigDiags := configBody.Content(&hcl.BodySchema{}) + if !noConfigDiags.HasErrors() { + return nil + } + resp := provider.GetSchema() diags = diags.Append(resp.Diagnostics) if diags.HasErrors() { - return diags.ErrWithWarnings() + return diags } configSchema := resp.Provider.Block @@ -60,27 +70,30 @@ func (n *NodeApplyableProvider) ValidateProvider(ctx EvalContext, provider provi configSchema = &configschema.Block{} } - configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) - diags = diags.Append(evalDiags) + configVal, _, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) if evalDiags.HasErrors() { - return diags.ErrWithWarnings() + return diags.Append(evalDiags) } + diags = diags.Append(evalDiags) + + // If our config value contains any marked values, ensure those are + // stripped out before sending this to the provider + unmarkedConfigVal, _ := configVal.UnmarkDeep() req := providers.PrepareProviderConfigRequest{ - Config: configVal, + Config: unmarkedConfigVal, } validateResp := provider.PrepareProviderConfig(req) diags = diags.Append(validateResp.Diagnostics) - return diags.ErrWithWarnings() + return diags } // ConfigureProvider configures a provider that is already initialized and retrieved. // If verifyConfigIsKnown is true, ConfigureProvider will return an error if the // provider configVal is not wholly known and is meant only for use during import. -func (n *NodeApplyableProvider) ConfigureProvider(ctx EvalContext, provider providers.Interface, verifyConfigIsKnown bool) error { - var diags tfdiags.Diagnostics +func (n *NodeApplyableProvider) ConfigureProvider(ctx EvalContext, provider providers.Interface, verifyConfigIsKnown bool) (diags tfdiags.Diagnostics) { config := n.ProviderConfig() configBody := buildProviderConfig(ctx, n.Addr, config) @@ -88,14 +101,14 @@ func (n *NodeApplyableProvider) ConfigureProvider(ctx EvalContext, provider prov resp := provider.GetSchema() diags = diags.Append(resp.Diagnostics) if diags.HasErrors() { - return diags.ErrWithWarnings() + return diags } configSchema := resp.Provider.Block configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) diags = diags.Append(evalDiags) if evalDiags.HasErrors() { - return diags.ErrWithWarnings() + return diags } if verifyConfigIsKnown && !configVal.IsWhollyKnown() { @@ -105,11 +118,68 @@ func (n *NodeApplyableProvider) ConfigureProvider(ctx EvalContext, provider prov Detail: fmt.Sprintf("The configuration for %s depends on values that cannot be determined until apply.", n.Addr), Subject: &config.DeclRange, }) - return diags.ErrWithWarnings() + return diags } - configDiags := ctx.ConfigureProvider(n.Addr, configVal) - configDiags = configDiags.InConfigBody(configBody) + // If our config value contains any marked values, ensure those are + // stripped out before sending this to the provider + unmarkedConfigVal, _ := configVal.UnmarkDeep() - return configDiags.ErrWithWarnings() + // Allow the provider to validate and insert any defaults into the full + // configuration. + req := providers.PrepareProviderConfigRequest{ + Config: unmarkedConfigVal, + } + + // PrepareProviderConfig is only used for validation. We are intentionally + // ignoring the PreparedConfig field to maintain existing behavior. + prepareResp := provider.PrepareProviderConfig(req) + if prepareResp.Diagnostics.HasErrors() { + if config == nil { + // If there isn't an explicit "provider" block in the configuration, + // this error message won't be very clear. Add some detail to the + // error message in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider configuration", + fmt.Sprintf(providerConfigErr, prepareResp.Diagnostics.Err(), n.Addr.Provider), + )) + return diags + } else { + return diags.Append(prepareResp.Diagnostics) + } + } + diags = diags.Append(prepareResp.Diagnostics) + + // If the provider returns something different, log a warning to help + // indicate to provider developers that the value is not used. + preparedCfg := prepareResp.PreparedConfig + if preparedCfg != cty.NilVal && !preparedCfg.IsNull() && !preparedCfg.RawEquals(unmarkedConfigVal) { + log.Printf("[WARN] PrepareProviderConfig from %q changed the config value, but that value is unused", n.Addr) + } + + configDiags := ctx.ConfigureProvider(n.Addr, unmarkedConfigVal) + if configDiags.HasErrors() { + if config == nil { + // If there isn't an explicit "provider" block in the configuration, + // this error message won't be very clear. Add some detail to the + // error message in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider configuration", + fmt.Sprintf(providerConfigErr, configDiags.InConfigBody(configBody).Err(), n.Addr.Provider), + )) + return diags + } else { + return diags.Append(configDiags.InConfigBody(configBody)) + } + } + diags = diags.Append(configDiags.InConfigBody(configBody)) + + return diags } + +const providerConfigErr = `%s + +Provider %q requires explicit configuration. Add a provider block to the root module and configure the provider's required arguments as described in the provider documentation. +` diff --git a/terraform/node_provider_eval.go b/terraform/node_provider_eval.go index aa6ba2c58..a89583cff 100644 --- a/terraform/node_provider_eval.go +++ b/terraform/node_provider_eval.go @@ -1,5 +1,7 @@ package terraform +import "github.com/hashicorp/terraform/tfdiags" + // NodeEvalableProvider represents a provider during an "eval" walk. // This special provider node type just initializes a provider and // fetches its schema, without configuring it or otherwise interacting @@ -8,8 +10,10 @@ type NodeEvalableProvider struct { *NodeAbstractProvider } +var _ GraphNodeExecutable = (*NodeEvalableProvider)(nil) + // GraphNodeExecutable -func (n *NodeEvalableProvider) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeEvalableProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { _, err := ctx.InitProvider(n.Addr) - return err + return diags.Append(err) } diff --git a/terraform/node_provider_test.go b/terraform/node_provider_test.go index 5c418bde9..5b4297938 100644 --- a/terraform/node_provider_test.go +++ b/terraform/node_provider_test.go @@ -1,10 +1,16 @@ package terraform import ( + "fmt" + "strings" "testing" + "github.com/hashicorp/hcl/v2" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) @@ -65,13 +71,13 @@ func TestNodeApplyableProviderExecute_unknownImport(t *testing.T) { ctx := &MockEvalContext{ProviderProvider: provider} ctx.installSimpleEval() - err := n.Execute(ctx, walkImport) - if err == nil { + diags := n.Execute(ctx, walkImport) + if !diags.HasErrors() { t.Fatal("expected error, got success") } detail := `Invalid provider configuration: The configuration for provider["registry.terraform.io/hashicorp/foo"] depends on values that cannot be determined until apply.` - if got, want := err.Error(), detail; got != want { + if got, want := diags.Err().Error(), detail; got != want { t.Errorf("wrong diagnostic detail\n got: %q\nwant: %q", got, want) } @@ -115,3 +121,352 @@ func TestNodeApplyableProviderExecute_unknownApply(t *testing.T) { t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) } } + +func TestNodeApplyableProviderExecute_sensitive(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.StringVal("hello").Mark("sensitive"), + }), + } + provider := mockProviderWithConfigSchema(simpleTestSchema()) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + if err := n.Execute(ctx, walkApply); err != nil { + t.Fatalf("err: %s", err) + } + + if !ctx.ConfigureProviderCalled { + t.Fatal("should be called") + } + + gotObj := ctx.ConfigureProviderConfig + if !gotObj.Type().HasAttribute("test_string") { + t.Fatal("configuration object does not have \"test_string\" attribute") + } + if got, want := gotObj.GetAttr("test_string"), cty.StringVal("hello"); !got.RawEquals(want) { + t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestNodeApplyableProviderExecute_sensitiveValidate(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.StringVal("hello").Mark("sensitive"), + }), + } + provider := mockProviderWithConfigSchema(simpleTestSchema()) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + if err := n.Execute(ctx, walkValidate); err != nil { + t.Fatalf("err: %s", err) + } + + if !provider.PrepareProviderConfigCalled { + t.Fatal("should be called") + } + + gotObj := provider.PrepareProviderConfigRequest.Config + if !gotObj.Type().HasAttribute("test_string") { + t.Fatal("configuration object does not have \"test_string\" attribute") + } + if got, want := gotObj.GetAttr("test_string"), cty.StringVal("hello"); !got.RawEquals(want) { + t.Errorf("wrong configuration value\ngot: %#v\nwant: %#v", got, want) + } +} + +func TestNodeApplyableProviderExecute_emptyValidate(t *testing.T) { + config := &configs.Provider{ + Name: "foo", + Config: configs.SynthBody("", map[string]cty.Value{}), + } + provider := mockProviderWithConfigSchema(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "test_string": { + Type: cty.String, + Required: true, + }, + }, + }) + providerAddr := addrs.AbsProviderConfig{ + Module: addrs.RootModule, + Provider: addrs.NewDefaultProvider("foo"), + } + + n := &NodeApplyableProvider{&NodeAbstractProvider{ + Addr: providerAddr, + Config: config, + }} + + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + if err := n.Execute(ctx, walkValidate); err != nil { + t.Fatalf("err: %s", err) + } + + if ctx.ConfigureProviderCalled { + t.Fatal("should not be called") + } +} + +func TestNodeApplyableProvider_Validate(t *testing.T) { + provider := mockProviderWithConfigSchema(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": { + Type: cty.String, + Required: true, + }, + }, + }) + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + + t.Run("valid", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: configs.SynthBody("", map[string]cty.Value{ + "region": cty.StringVal("mars"), + }), + } + + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ValidateProvider(ctx, provider) + if diags.HasErrors() { + t.Errorf("unexpected error with valid config: %s", diags.Err()) + } + }) + + t.Run("invalid", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: configs.SynthBody("", map[string]cty.Value{ + "region": cty.MapValEmpty(cty.String), + }), + } + + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ValidateProvider(ctx, provider) + if !diags.HasErrors() { + t.Error("missing expected error with invalid config") + } + }) + + t.Run("empty config", func(t *testing.T) { + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + }, + } + + diags := node.ValidateProvider(ctx, provider) + if diags.HasErrors() { + t.Errorf("unexpected error with empty config: %s", diags.Err()) + } + }) +} + +//This test specifically tests responses from the +//providers.PrepareProviderConfigFn. See +//TestNodeApplyableProvider_ConfigProvider_config_fn_err for +//providers.ConfigureRequest responses. +func TestNodeApplyableProvider_ConfigProvider(t *testing.T) { + provider := mockProviderWithConfigSchema(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": { + Type: cty.String, + Optional: true, + }, + }, + }) + // For this test, we're returning an error for an optional argument. This + // can happen for example if an argument is only conditionally required. + provider.PrepareProviderConfigFn = func(req providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { + region := req.Config.GetAttr("region") + if region.IsNull() { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("value is not found")) + } + return + } + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + + t.Run("valid", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: configs.SynthBody("", map[string]cty.Value{ + "region": cty.StringVal("mars"), + }), + } + + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if diags.HasErrors() { + t.Errorf("unexpected error with valid config: %s", diags.Err()) + } + }) + + t.Run("missing required config (no config at all)", func(t *testing.T) { + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if !diags.HasErrors() { + t.Fatal("missing expected error with nil config") + } + if !strings.Contains(diags.Err().Error(), "requires explicit configuration") { + t.Errorf("diagnostic is missing \"requires explicit configuration\" message: %s", diags.Err()) + } + }) + + t.Run("missing required config", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: hcl.EmptyBody(), + } + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if !diags.HasErrors() { + t.Fatal("missing expected error with invalid config") + } + if diags.Err().Error() != "value is not found" { + t.Errorf("wrong diagnostic: %s", diags.Err()) + } + }) + +} + +//This test is similar to TestNodeApplyableProvider_ConfigProvider, but tests responses from the providers.ConfigureRequest +func TestNodeApplyableProvider_ConfigProvider_config_fn_err(t *testing.T) { + provider := mockProviderWithConfigSchema(&configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "region": { + Type: cty.String, + Optional: true, + }, + }, + }) + ctx := &MockEvalContext{ProviderProvider: provider} + ctx.installSimpleEval() + // For this test, provider.PrepareConfigFn will succeed every time but the + // ctx.ConfigureProviderFn will return an error if a value is not found. + // + // This is an unlikely but real situation that occurs: + // https://github.com/hashicorp/terraform/issues/23087 + ctx.ConfigureProviderFn = func(addr addrs.AbsProviderConfig, cfg cty.Value) (diags tfdiags.Diagnostics) { + if cfg.IsNull() { + diags = diags.Append(fmt.Errorf("no config provided")) + } else { + region := cfg.GetAttr("region") + if region.IsNull() { + diags = diags.Append(fmt.Errorf("value is not found")) + } + } + return + } + + t.Run("valid", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: configs.SynthBody("", map[string]cty.Value{ + "region": cty.StringVal("mars"), + }), + } + + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if diags.HasErrors() { + t.Errorf("unexpected error with valid config: %s", diags.Err()) + } + }) + + t.Run("missing required config (no config at all)", func(t *testing.T) { + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if !diags.HasErrors() { + t.Fatal("missing expected error with nil config") + } + if !strings.Contains(diags.Err().Error(), "requires explicit configuration") { + t.Errorf("diagnostic is missing \"requires explicit configuration\" message: %s", diags.Err()) + } + }) + + t.Run("missing required config", func(t *testing.T) { + config := &configs.Provider{ + Name: "test", + Config: hcl.EmptyBody(), + } + node := NodeApplyableProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + Addr: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + Config: config, + }, + } + + diags := node.ConfigureProvider(ctx, provider, false) + if !diags.HasErrors() { + t.Fatal("missing expected error with invalid config") + } + if diags.Err().Error() != "value is not found" { + t.Errorf("wrong diagnostic: %s", diags.Err()) + } + }) +} diff --git a/terraform/node_provisioner.go b/terraform/node_provisioner.go deleted file mode 100644 index c4df9c627..000000000 --- a/terraform/node_provisioner.go +++ /dev/null @@ -1,44 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/addrs" -) - -// NodeProvisioner represents a provider that has no associated operations. -// It registers all the common interfaces across operations for providers. -type NodeProvisioner struct { - NameValue string - PathValue addrs.ModuleInstance -} - -var ( - _ GraphNodeModuleInstance = (*NodeProvisioner)(nil) - _ GraphNodeProvisioner = (*NodeProvisioner)(nil) - _ GraphNodeExecutable = (*NodeProvisioner)(nil) -) - -func (n *NodeProvisioner) Name() string { - result := fmt.Sprintf("provisioner.%s", n.NameValue) - if len(n.PathValue) > 0 { - result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) - } - - return result -} - -// GraphNodeModuleInstance -func (n *NodeProvisioner) Path() addrs.ModuleInstance { - return n.PathValue -} - -// GraphNodeProvisioner -func (n *NodeProvisioner) ProvisionerName() string { - return n.NameValue -} - -// GraphNodeExecutable impl. -func (n *NodeProvisioner) Execute(ctx EvalContext, op walkOperation) error { - return ctx.InitProvisioner(n.NameValue) -} diff --git a/terraform/node_resource_abstract.go b/terraform/node_resource_abstract.go index 4606d5185..e152a6cb6 100644 --- a/terraform/node_resource_abstract.go +++ b/terraform/node_resource_abstract.go @@ -305,8 +305,7 @@ func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotN // eval is the only change we get to set the resource "each mode" to list // in that case, allowing expression evaluation to see it as a zero-element list // rather than as not set at all. -func (n *NodeAbstractResource) writeResourceState(ctx EvalContext, addr addrs.AbsResource) error { - var diags tfdiags.Diagnostics +func (n *NodeAbstractResource) writeResourceState(ctx EvalContext, addr addrs.AbsResource) (diags tfdiags.Diagnostics) { state := ctx.State() // We'll record our expansion decision in the shared "expander" object @@ -320,7 +319,7 @@ func (n *NodeAbstractResource) writeResourceState(ctx EvalContext, addr addrs.Ab count, countDiags := evaluateCountExpression(n.Config.Count, ctx) diags = diags.Append(countDiags) if countDiags.HasErrors() { - return diags.Err() + return diags } state.SetResourceProvider(addr, n.ResolvedProvider) @@ -330,7 +329,7 @@ func (n *NodeAbstractResource) writeResourceState(ctx EvalContext, addr addrs.Ab forEach, forEachDiags := evaluateForEachExpression(n.Config.ForEach, ctx) diags = diags.Append(forEachDiags) if forEachDiags.HasErrors() { - return diags.Err() + return diags } // This method takes care of all of the business logic of updating this @@ -343,27 +342,23 @@ func (n *NodeAbstractResource) writeResourceState(ctx EvalContext, addr addrs.Ab expander.SetResourceSingle(addr.Module, n.Addr.Resource) } - return nil + return diags } -// ReadResourceInstanceState reads the current object for a specific instance in +// readResourceInstanceState reads the current object for a specific instance in // the state. -func (n *NodeAbstractResource) ReadResourceInstanceState(ctx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, error) { - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - - if provider == nil { - panic("ReadResourceInstanceState used with no Provider object") - } - if providerSchema == nil { - panic("ReadResourceInstanceState used with no ProviderSchema object") +func (n *NodeAbstractResource) readResourceInstanceState(ctx EvalContext, addr addrs.AbsResourceInstance) (*states.ResourceInstanceObject, error) { + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, err } - log.Printf("[TRACE] ReadResourceInstanceState: reading state for %s", addr) + log.Printf("[TRACE] readResourceInstanceState: reading state for %s", addr) src := ctx.State().ResourceInstanceObject(addr, states.CurrentGen) if src == nil { // Presumably we only have deposed objects, then. - log.Printf("[TRACE] ReadResourceInstanceState: no state present for %s", addr) + log.Printf("[TRACE] readResourceInstanceState: no state present for %s", addr) return nil, nil } @@ -373,7 +368,52 @@ func (n *NodeAbstractResource) ReadResourceInstanceState(ctx EvalContext, addr a return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", addr) } var diags tfdiags.Diagnostics - src, diags = UpgradeResourceState(addr, provider, src, schema, currentVersion) + src, diags = upgradeResourceState(addr, provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + return obj, nil +} + +// readResourceInstanceStateDeposed reads the deposed object for a specific +// instance in the state. +func (n *NodeAbstractResource) readResourceInstanceStateDeposed(ctx EvalContext, addr addrs.AbsResourceInstance, key states.DeposedKey) (*states.ResourceInstanceObject, error) { + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, err + } + + if key == states.NotDeposed { + return nil, fmt.Errorf("readResourceInstanceStateDeposed used with no instance key; this is a bug in Terraform and should be reported") + } + + log.Printf("[TRACE] readResourceInstanceStateDeposed: reading state for %s deposed object %s", addr, key) + + src := ctx.State().ResourceInstanceObject(addr, key) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] readResourceInstanceStateDeposed: no state present for %s deposed object %s", addr, key) + return nil, nil + } + + schema, currentVersion := (providerSchema).SchemaForResourceAddr(addr.Resource.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", addr) + + } + + src, diags := upgradeResourceState(addr, provider, src, schema, currentVersion) if diags.HasErrors() { // Note that we don't have any channel to return warnings here. We'll // accept that for now since warnings during a schema upgrade would diff --git a/terraform/node_resource_abstract_instance.go b/terraform/node_resource_abstract_instance.go index 4739679ce..c73f60050 100644 --- a/terraform/node_resource_abstract_instance.go +++ b/terraform/node_resource_abstract_instance.go @@ -3,12 +3,19 @@ package terraform import ( "fmt" "log" + "strings" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" ) // NodeAbstractResourceInstance represents a resource instance with no @@ -162,7 +169,7 @@ func (n *NodeAbstractResourceInstance) readDiff(ctx EvalContext, providerSchema gen := states.CurrentGen csrc := changes.GetResourceInstanceChange(addr, gen) if csrc == nil { - log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", n.Addr) + log.Printf("[TRACE] readDiff: No planned change recorded for %s", n.Addr) return nil, nil } @@ -171,7 +178,7 @@ func (n *NodeAbstractResourceInstance) readDiff(ctx EvalContext, providerSchema return nil, fmt.Errorf("failed to decode planned changes for %s: %s", n.Addr, err) } - log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, n.Addr) + log.Printf("[TRACE] readDiff: Read %s change from plan for %s", change.Action, n.Addr) return change, nil } @@ -199,3 +206,1908 @@ func (n *NodeAbstractResourceInstance) checkPreventDestroy(change *plans.Resourc return nil } + +// preApplyHook calls the pre-Apply hook +func (n *NodeAbstractResourceInstance) preApplyHook(ctx EvalContext, change *plans.ResourceInstanceChange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if change == nil { + panic(fmt.Sprintf("preApplyHook for %s called with nil Change", n.Addr)) + } + + // Only managed resources have user-visible apply actions. + if n.Addr.Resource.Resource.Mode == addrs.ManagedResourceMode { + priorState := change.Before + plannedNewState := change.After + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(n.Addr, nil, change.Action, priorState, plannedNewState) + })) + if diags.HasErrors() { + return diags + } + } + + return nil +} + +// postApplyHook calls the post-Apply hook +func (n *NodeAbstractResourceInstance) postApplyHook(ctx EvalContext, state *states.ResourceInstanceObject, err error) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Only managed resources have user-visible apply actions. + if n.Addr.Resource.Resource.Mode == addrs.ManagedResourceMode { + var newState cty.Value + if state != nil { + newState = state.Value + } else { + newState = cty.NullVal(cty.DynamicPseudoType) + } + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Addr, nil, newState, err) + })) + } + + return diags +} + +type phaseState int + +const ( + workingState phaseState = iota + refreshState +) + +// writeResourceInstanceState saves the given object as the current object for +// the selected resource instance. +// +// dependencies is a parameter, instead of those directly attacted to the +// NodeAbstractResourceInstance, because we don't write dependencies for +// datasources. +// +// targetState determines which context state we're writing to during plan. The +// default is the global working state. +func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject, dependencies []addrs.ConfigResource, targetState phaseState) error { + absAddr := n.Addr + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return err + } + + var state *states.SyncState + switch targetState { + case refreshState: + log.Printf("[TRACE] writeResourceInstanceState: using RefreshState for %s", absAddr) + state = ctx.RefreshState() + default: + state = ctx.State() + } + + if obj == nil || obj.Value.IsNull() { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceCurrent(absAddr, nil, n.ResolvedProvider) + log.Printf("[TRACE] writeResourceInstanceState: removing state object for %s", absAddr) + return nil + } + + // store the new deps in the state. + // We check for nil here because don't want to override existing dependencies on orphaned nodes. + if dependencies != nil { + obj.Dependencies = dependencies + } + + if providerSchema == nil { + // Should never happen, unless our state object is nil + panic("writeResourceInstanceState used with nil ProviderSchema") + } + + if obj != nil { + log.Printf("[TRACE] writeResourceInstanceState: writing current state object for %s", absAddr) + } else { + log.Printf("[TRACE] writeResourceInstanceState: removing current state object for %s", absAddr) + } + + schema, currentVersion := (*providerSchema).SchemaForResourceAddr(absAddr.ContainingResource().Resource) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + + state.SetResourceInstanceCurrent(absAddr, src, n.ResolvedProvider) + return nil +} + +// planDestroy returns a plain destroy diff. +func (n *NodeAbstractResourceInstance) planDestroy(ctx EvalContext, currentState *states.ResourceInstanceObject, deposedKey states.DeposedKey) (*plans.ResourceInstanceChange, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + absAddr := n.Addr + + if n.ResolvedProvider.Provider.Type == "" { + if deposedKey == "" { + panic(fmt.Sprintf("planDestroy for %s does not have ProviderAddr set", absAddr)) + } else { + panic(fmt.Sprintf("planDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, deposedKey)) + } + } + + // If there is no state or our attributes object is null then we're already + // destroyed. + if currentState == nil || currentState.Value.IsNull() { + return nil, nil + } + + // Call pre-diff hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff( + absAddr, deposedKey.Generation(), + currentState.Value, + cty.NullVal(cty.DynamicPseudoType), + ) + })) + if diags.HasErrors() { + return nil, diags + } + + // Plan is always the same for a destroy. We don't need the provider's + // help for this one. + plan := &plans.ResourceInstanceChange{ + Addr: absAddr, + DeposedKey: deposedKey, + Change: plans.Change{ + Action: plans.Delete, + Before: currentState.Value, + After: cty.NullVal(cty.DynamicPseudoType), + }, + Private: currentState.Private, + ProviderAddr: n.ResolvedProvider, + } + + // Call post-diff hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff( + absAddr, + deposedKey.Generation(), + plan.Action, + plan.Before, + plan.After, + ) + })) + + return plan, diags +} + +// writeChange saves a planned change for an instance object into the set of +// global planned changes. +func (n *NodeAbstractResourceInstance) writeChange(ctx EvalContext, change *plans.ResourceInstanceChange, deposedKey states.DeposedKey) error { + changes := ctx.Changes() + + if change == nil { + // Caller sets nil to indicate that we need to remove a change from + // the set of changes. + gen := states.CurrentGen + if deposedKey != states.NotDeposed { + gen = deposedKey + } + changes.RemoveResourceInstanceChange(n.Addr, gen) + return nil + } + + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return err + } + + if change.Addr.String() != n.Addr.String() || change.DeposedKey != deposedKey { + // Should never happen, and indicates a bug in the caller. + panic("inconsistent address and/or deposed key in writeChange") + } + + ri := n.Addr.Resource + schema, _ := providerSchema.SchemaForResourceAddr(ri.Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return fmt.Errorf("provider does not support resource type %q", ri.Resource.Type) + } + + csrc, err := change.Encode(schema.ImpliedType()) + if err != nil { + return fmt.Errorf("failed to encode planned changes for %s: %s", n.Addr, err) + } + + changes.AppendResourceInstanceChange(csrc) + if deposedKey == states.NotDeposed { + log.Printf("[TRACE] writeChange: recorded %s change for %s", change.Action, n.Addr) + } else { + log.Printf("[TRACE] writeChange: recorded %s change for %s deposed object %s", change.Action, n.Addr, deposedKey) + } + + return nil +} + +// refresh does a refresh for a resource +func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + absAddr := n.Addr + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return state, diags.Append(err) + } + // If we have no state, we don't do any refreshing + if state == nil { + log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", absAddr) + return state, diags + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.Resource.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type)) + return state, diags + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return state, diags + } + + // Call pre-refresh hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreRefresh(absAddr, states.CurrentGen, state.Value) + })) + if diags.HasErrors() { + return state, diags + } + + // Refresh! + priorVal := state.Value + + // Unmarked before sending to provider + var priorPaths []cty.PathValueMarks + if priorVal.ContainsMarked() { + priorVal, priorPaths = priorVal.UnmarkDeepWithPaths() + } + + providerReq := providers.ReadResourceRequest{ + TypeName: n.Addr.Resource.Resource.Type, + PriorState: priorVal, + Private: state.Private, + ProviderMeta: metaConfigVal, + } + + resp := provider.ReadResource(providerReq) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return state, diags + } + + if resp.NewState == cty.NilVal { + // This ought not to happen in real cases since it's not possible to + // send NilVal over the plugin RPC channel, but it can come up in + // tests due to sloppy mocking. + panic("new state is cty.NilVal") + } + + for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider.String(), absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return state, diags + } + + // We have no way to exempt provider using the legacy SDK from this check, + // so we can only log inconsistencies with the updated state values. + // In most cases these are not errors anyway, and represent "drift" from + // external changes which will be handled by the subsequent plan. + if errs := objchange.AssertObjectCompatible(schema, priorVal, resp.NewState); len(errs) > 0 { + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s during refresh.", n.ResolvedProvider.Provider.String(), absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } + + ret := state.DeepCopy() + ret.Value = resp.NewState + ret.Private = resp.Private + ret.Dependencies = state.Dependencies + ret.CreateBeforeDestroy = state.CreateBeforeDestroy + + // Call post-refresh hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, priorVal, ret.Value) + })) + if diags.HasErrors() { + return ret, diags + } + + // Mark the value if necessary + if len(priorPaths) > 0 { + ret.Value = ret.Value.MarkWithPaths(priorPaths) + } + + return ret, diags +} + +func (n *NodeAbstractResourceInstance) plan( + ctx EvalContext, + plannedChange *plans.ResourceInstanceChange, + currentState *states.ResourceInstanceObject, + createBeforeDestroy bool) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var state *states.ResourceInstanceObject + var plan *plans.ResourceInstanceChange + + config := *n.Config + resource := n.Addr.Resource.Resource + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return plan, state, diags.Append(err) + } + + if plannedChange != nil { + // If we already planned the action, we stick to that plan + createBeforeDestroy = plannedChange.Action == plans.CreateThenDelete + } + + if providerSchema == nil { + diags = diags.Append(fmt.Errorf("provider schema is unavailable for %s", n.Addr)) + return plan, state, diags + } + + // Evaluate the configuration + schema, _ := providerSchema.SchemaForResourceAddr(resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type %q", resource.Type)) + return plan, state, diags + } + + forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) + + keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + origConfigVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return plan, state, diags + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return plan, state, diags + } + + var priorVal cty.Value + var priorValTainted cty.Value + var priorPrivate []byte + if currentState != nil { + if currentState.Status != states.ObjectTainted { + priorVal = currentState.Value + priorPrivate = currentState.Private + } else { + // If the prior state is tainted then we'll proceed below like + // we're creating an entirely new object, but then turn it into + // a synthetic "Replace" change at the end, creating the same + // result as if the provider had marked at least one argument + // change as "requires replacement". + priorValTainted = currentState.Value + priorVal = cty.NullVal(schema.ImpliedType()) + } + } else { + priorVal = cty.NullVal(schema.ImpliedType()) + } + + // Create an unmarked version of our config val and our prior val. + // Store the paths for the config val to re-markafter + // we've sent things over the wire. + unmarkedConfigVal, unmarkedPaths := origConfigVal.UnmarkDeepWithPaths() + unmarkedPriorVal, priorPaths := priorVal.UnmarkDeepWithPaths() + + log.Printf("[TRACE] Re-validating config for %q", n.Addr) + // Allow the provider to validate the final set of values. + // The config was statically validated early on, but there may have been + // unknown values which the provider could not validate at the time. + // TODO: It would be more correct to validate the config after + // ignore_changes has been applied, but the current implementation cannot + // exclude computed-only attributes when given the `all` option. + validateResp := provider.ValidateResourceTypeConfig( + providers.ValidateResourceTypeConfigRequest{ + TypeName: n.Addr.Resource.Resource.Type, + Config: unmarkedConfigVal, + }, + ) + if validateResp.Diagnostics.HasErrors() { + diags = diags.Append(validateResp.Diagnostics.InConfigBody(config.Config)) + return plan, state, diags + } + + // ignore_changes is meant to only apply to the configuration, so it must + // be applied before we generate a plan. This ensures the config used for + // the proposed value, the proposed value itself, and the config presented + // to the provider in the PlanResourceChange request all agree on the + // starting values. + configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(unmarkedPriorVal, unmarkedConfigVal) + diags = diags.Append(ignoreChangeDiags) + if ignoreChangeDiags.HasErrors() { + return plan, state, diags + } + + proposedNewVal := objchange.ProposedNewObject(schema, unmarkedPriorVal, configValIgnored) + + // Call pre-diff hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Addr, states.CurrentGen, priorVal, proposedNewVal) + })) + if diags.HasErrors() { + return plan, state, diags + } + + resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Resource.Type, + Config: configValIgnored, + PriorState: unmarkedPriorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: priorPrivate, + ProviderMeta: metaConfigVal, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + if diags.HasErrors() { + return plan, state, diags + } + + plannedNewVal := resp.PlannedState + plannedPrivate := resp.PlannedPrivate + + if plannedNewVal == cty.NilVal { + // Should never happen. Since real-world providers return via RPC a nil + // is always a bug in the client-side stub. This is more likely caused + // by an incompletely-configured mock provider in tests, though. + panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", n.Addr)) + } + + // We allow the planned new value to disagree with configuration _values_ + // here, since that allows the provider to do special logic like a + // DiffSuppressFunc, but we still require that the provider produces + // a value whose type conforms to the schema. + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), + ), + )) + } + if diags.HasErrors() { + return plan, state, diags + } + + if errs := objchange.AssertPlanValid(schema, unmarkedPriorVal, configValIgnored, plannedNewVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, + "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", + n.ResolvedProvider.Provider, n.Addr, + ) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), + ), + )) + } + return plan, state, diags + } + } + + if resp.LegacyTypeSystem { + // Because we allow legacy providers to depart from the contract and + // return changes to non-computed values, the plan response may have + // altered values that were already suppressed with ignore_changes. + // A prime example of this is where providers attempt to obfuscate + // config data by turning the config value into a hash and storing the + // hash value in the state. There are enough cases of this in existing + // providers that we must accommodate the behavior for now, so for + // ignore_changes to work at all on these values, we will revert the + // ignored values once more. + plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(unmarkedPriorVal, plannedNewVal) + diags = diags.Append(ignoreChangeDiags) + if ignoreChangeDiags.HasErrors() { + return plan, state, diags + } + } + + // Add the marks back to the planned new value -- this must happen after ignore changes + // have been processed + unmarkedPlannedNewVal := plannedNewVal + if len(unmarkedPaths) > 0 { + plannedNewVal = plannedNewVal.MarkWithPaths(unmarkedPaths) + } + + // The provider produces a list of paths to attributes whose changes mean + // that we must replace rather than update an existing remote object. + // However, we only need to do that if the identified attributes _have_ + // actually changed -- particularly after we may have undone some of the + // changes in processIgnoreChanges -- so now we'll filter that list to + // include only where changes are detected. + reqRep := cty.NewPathSet() + if len(resp.RequiresReplace) > 0 { + for _, path := range resp.RequiresReplace { + if priorVal.IsNull() { + // If prior is null then we don't expect any RequiresReplace at all, + // because this is a Create action. + continue + } + + priorChangedVal, priorPathDiags := hcl.ApplyPath(unmarkedPriorVal, path, nil) + plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) + if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { + // This means the path was invalid in both the prior and new + // values, which is an error with the provider itself. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, n.Addr, path, + ), + )) + continue + } + + // Make sure we have valid Values for both values. + // Note: if the opposing value was of the type + // cty.DynamicPseudoType, the type assigned here may not exactly + // match the schema. This is fine here, since we're only going to + // check for equality, but if the NullVal is to be used, we need to + // check the schema for th true type. + switch { + case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: + // this should never happen without ApplyPath errors above + panic("requires replace path returned 2 nil values") + case priorChangedVal == cty.NilVal: + priorChangedVal = cty.NullVal(plannedChangedVal.Type()) + case plannedChangedVal == cty.NilVal: + plannedChangedVal = cty.NullVal(priorChangedVal.Type()) + } + + // Unmark for this value for the equality test. If only sensitivity has changed, + // this does not require an Update or Replace + unmarkedPlannedChangedVal, _ := plannedChangedVal.UnmarkDeep() + eqV := unmarkedPlannedChangedVal.Equals(priorChangedVal) + if !eqV.IsKnown() || eqV.False() { + reqRep.Add(path) + } + } + if diags.HasErrors() { + return plan, state, diags + } + } + + // Unmark for this test for value equality. + eqV := unmarkedPlannedNewVal.Equals(unmarkedPriorVal) + eq := eqV.IsKnown() && eqV.True() + + var action plans.Action + switch { + case priorVal.IsNull(): + action = plans.Create + case eq: + action = plans.NoOp + case !reqRep.Empty(): + // If there are any "requires replace" paths left _after our filtering + // above_ then this is a replace action. + if createBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + default: + action = plans.Update + // "Delete" is never chosen here, because deletion plans are always + // created more directly elsewhere, such as in "orphan" handling. + } + + if action.IsReplace() { + // In this strange situation we want to produce a change object that + // shows our real prior object but has a _new_ object that is built + // from a null prior object, since we're going to delete the one + // that has all the computed values on it. + // + // Therefore we'll ask the provider to plan again here, giving it + // a null object for the prior, and then we'll meld that with the + // _actual_ prior state to produce a correctly-shaped replace change. + // The resulting change should show any computed attributes changing + // from known prior values to unknown values, unless the provider is + // able to predict new values for any of these computed attributes. + nullPriorVal := cty.NullVal(schema.ImpliedType()) + + // Since there is no prior state to compare after replacement, we need + // a new unmarked config from our original with no ignored values. + unmarkedConfigVal := origConfigVal + if origConfigVal.ContainsMarked() { + unmarkedConfigVal, _ = origConfigVal.UnmarkDeep() + } + + // create a new proposed value from the null state and the config + proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, unmarkedConfigVal) + + resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Resource.Type, + Config: unmarkedConfigVal, + PriorState: nullPriorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: plannedPrivate, + ProviderMeta: metaConfigVal, + }) + // We need to tread carefully here, since if there are any warnings + // in here they probably also came out of our previous call to + // PlanResourceChange above, and so we don't want to repeat them. + // Consequently, we break from the usual pattern here and only + // append these new diagnostics if there's at least one error inside. + if resp.Diagnostics.HasErrors() { + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + return plan, state, diags + } + plannedNewVal = resp.PlannedState + plannedPrivate = resp.PlannedPrivate + + if len(unmarkedPaths) > 0 { + plannedNewVal = plannedNewVal.MarkWithPaths(unmarkedPaths) + } + + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.Provider, n.Addr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return plan, state, diags + } + } + + // If our prior value was tainted then we actually want this to appear + // as a replace change, even though so far we've been treating it as a + // create. + if action == plans.Create && !priorValTainted.IsNull() { + if createBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + priorVal = priorValTainted + } + + // If we plan to write or delete sensitive paths from state, + // this is an Update action + if action == plans.NoOp && !marksEqual(unmarkedPaths, priorPaths) { + action = plans.Update + } + + // As a special case, if we have a previous diff (presumably from the plan + // phases, whereas we're now in the apply phase) and it was for a replace, + // we've already deleted the original object from state by the time we + // get here and so we would've ended up with a _create_ action this time, + // which we now need to paper over to get a result consistent with what + // we originally intended. + if plannedChange != nil { + prevChange := *plannedChange + if prevChange.Action.IsReplace() && action == plans.Create { + log.Printf("[TRACE] plan: %s treating Create change as %s change to match with earlier plan", n.Addr, prevChange.Action) + action = prevChange.Action + priorVal = prevChange.Before + } + } + + // Call post-refresh hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Addr, states.CurrentGen, action, priorVal, plannedNewVal) + })) + if diags.HasErrors() { + return plan, state, diags + } + + // Update our return plan + plan = &plans.ResourceInstanceChange{ + Addr: n.Addr, + Private: plannedPrivate, + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + Action: action, + Before: priorVal, + // Pass the marked planned value through in our change + // to propogate through evaluation. + // Marks will be removed when encoding. + After: plannedNewVal, + }, + RequiredReplace: reqRep, + } + + // Update our return state + state = &states.ResourceInstanceObject{ + // We use the special "planned" status here to note that this + // object's value is not yet complete. Objects with this status + // cannot be used during expression evaluation, so the caller + // must _also_ record the returned change in the active plan, + // which the expression evaluator will use in preference to this + // incomplete value recorded in the state. + Status: states.ObjectPlanned, + Value: plannedNewVal, + Private: plannedPrivate, + } + + return plan, state, diags +} + +func (n *NodeAbstractResource) processIgnoreChanges(prior, config cty.Value) (cty.Value, tfdiags.Diagnostics) { + // ignore_changes only applies when an object already exists, since we + // can't ignore changes to a thing we've not created yet. + if prior.IsNull() { + return config, nil + } + + ignoreChanges := n.Config.Managed.IgnoreChanges + ignoreAll := n.Config.Managed.IgnoreAllChanges + + if len(ignoreChanges) == 0 && !ignoreAll { + return config, nil + } + if ignoreAll { + return prior, nil + } + if prior.IsNull() || config.IsNull() { + // Ignore changes doesn't apply when we're creating for the first time. + // Proposed should never be null here, but if it is then we'll just let it be. + return config, nil + } + + return processIgnoreChangesIndividual(prior, config, ignoreChanges) +} + +func processIgnoreChangesIndividual(prior, config cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { + // When we walk below we will be using cty.Path values for comparison, so + // we'll convert our traversals here so we can compare more easily. + ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) + for i, traversal := range ignoreChanges { + path := make(cty.Path, len(traversal)) + for si, step := range traversal { + switch ts := step.(type) { + case hcl.TraverseRoot: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseAttr: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseIndex: + path[si] = cty.IndexStep{ + Key: ts.Key, + } + default: + panic(fmt.Sprintf("unsupported traversal step %#v", step)) + } + } + ignoreChangesPath[i] = path + } + + type ignoreChange struct { + // Path is the full path, minus any trailing map index + path cty.Path + // Value is the value we are to retain at the above path. If there is a + // key value, this must be a map and the desired value will be at the + // key index. + value cty.Value + // Key is the index key if the ignored path ends in a map index. + key cty.Value + } + var ignoredValues []ignoreChange + + // Find the actual changes first and store them in the ignoreChange struct. + // If the change was to a map value, and the key doesn't exist in the + // config, it would never be visited in the transform walk. + for _, icPath := range ignoreChangesPath { + key := cty.NullVal(cty.String) + // check for a map index, since maps are the only structure where we + // could have invalid path steps. + last, ok := icPath[len(icPath)-1].(cty.IndexStep) + if ok { + if last.Key.Type() == cty.String { + icPath = icPath[:len(icPath)-1] + key = last.Key + } + } + + // The structure should have been validated already, and we already + // trimmed the trailing map index. Any other intermediate index error + // means we wouldn't be able to apply the value below, so no need to + // record this. + p, err := icPath.Apply(prior) + if err != nil { + continue + } + c, err := icPath.Apply(config) + if err != nil { + continue + } + + // If this is a map, it is checking the entire map value for equality + // rather than the individual key. This means that the change is stored + // here even if our ignored key doesn't change. That is OK since it + // won't cause any changes in the transformation, but allows us to skip + // breaking up the maps and checking for key existence here too. + eq := p.Equals(c) + if !eq.IsKnown() || eq.False() { + // there a change to ignore at this path, store the prior value + ignoredValues = append(ignoredValues, ignoreChange{icPath, p, key}) + } + } + + if len(ignoredValues) == 0 { + return config, nil + } + + ret, _ := cty.Transform(config, func(path cty.Path, v cty.Value) (cty.Value, error) { + // Easy path for when we are only matching the entire value. The only + // values we break up for inspection are maps. + if !v.Type().IsMapType() { + for _, ignored := range ignoredValues { + if path.Equals(ignored.path) { + return ignored.value, nil + } + } + return v, nil + } + // We now know this must be a map, so we need to accumulate the values + // key-by-key. + + if !v.IsNull() && !v.IsKnown() { + // since v is not known, we cannot ignore individual keys + return v, nil + } + + // The configMap is the current configuration value, which we will + // mutate based on the ignored paths and the prior map value. + var configMap map[string]cty.Value + switch { + case v.IsNull() || v.LengthInt() == 0: + configMap = map[string]cty.Value{} + default: + configMap = v.AsValueMap() + } + + for _, ignored := range ignoredValues { + if !path.Equals(ignored.path) { + continue + } + + if ignored.key.IsNull() { + // The map address is confirmed to match at this point, + // so if there is no key, we want the entire map and can + // stop accumulating values. + return ignored.value, nil + } + // Now we know we are ignoring a specific index of this map, so get + // the config map and modify, add, or remove the desired key. + + // We also need to create a prior map, so we can check for + // existence while getting the value, because Value.Index will + // return null for a key with a null value and for a non-existent + // key. + var priorMap map[string]cty.Value + switch { + case ignored.value.IsNull() || ignored.value.LengthInt() == 0: + priorMap = map[string]cty.Value{} + default: + priorMap = ignored.value.AsValueMap() + } + + key := ignored.key.AsString() + priorElem, keep := priorMap[key] + + switch { + case !keep: + // this didn't exist in the old map value, so we're keeping the + // "absence" of the key by removing it from the config + delete(configMap, key) + default: + configMap[key] = priorElem + } + } + + if len(configMap) == 0 { + return cty.MapValEmpty(v.Type().ElementType()), nil + } + + return cty.MapVal(configMap), nil + }) + return ret, nil +} + +// readDataSource handles everything needed to call ReadDataSource on the provider. +// A previously evaluated configVal can be passed in, or a new one is generated +// from the resource configuration. +func (n *NodeAbstractResourceInstance) readDataSource(ctx EvalContext, configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var newVal cty.Value + + config := *n.Config + + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return newVal, diags + } + if providerSchema == nil { + diags = diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) + return newVal, diags + } + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) + return newVal, diags + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return newVal, diags + } + + // Unmark before sending to provider, will re-mark before returning + var pvm []cty.PathValueMarks + configVal, pvm = configVal.UnmarkDeepWithPaths() + + log.Printf("[TRACE] readDataSource: Re-validating config for %s", n.Addr) + validateResp := provider.ValidateDataSourceConfig( + providers.ValidateDataSourceConfigRequest{ + TypeName: n.Addr.ContainingResource().Resource.Type, + Config: configVal, + }, + ) + if validateResp.Diagnostics.HasErrors() { + return newVal, validateResp.Diagnostics.InConfigBody(config.Config) + } + + // If we get down here then our configuration is complete and we're read + // to actually call the provider to read the data. + log.Printf("[TRACE] readDataSource: %s configuration is complete, so reading from provider", n.Addr) + + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.ContainingResource().Resource.Type, + Config: configVal, + ProviderMeta: metaConfigVal, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + if diags.HasErrors() { + return newVal, diags + } + newVal = resp.State + if newVal == cty.NilVal { + // This can happen with incompletely-configured mocks. We'll allow it + // and treat it as an alias for a properly-typed null value. + newVal = cty.NullVal(schema.ImpliedType()) + } + + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider, tfdiags.FormatErrorPrefixed(err, n.Addr.String()), + ), + )) + } + if diags.HasErrors() { + return newVal, diags + } + + if newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced null object", + fmt.Sprintf( + "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider, n.Addr, + ), + )) + } + + if !newVal.IsNull() && !newVal.IsWhollyKnown() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider, n.Addr, + ), + )) + + // We'll still save the object, but we need to eliminate any unknown + // values first because we can't serialize them in the state file. + // Note that this may cause set elements to be coalesced if they + // differed only by having unknown values, but we don't worry about + // that here because we're saving the value only for inspection + // purposes; the error we added above will halt the graph walk. + newVal = cty.UnknownAsNull(newVal) + } + + if len(pvm) > 0 { + newVal = newVal.MarkWithPaths(pvm) + } + + return newVal, diags +} + +func (n *NodeAbstractResourceInstance) providerMetas(ctx EvalContext) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + metaConfigVal := cty.NullVal(cty.DynamicPseudoType) + + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return metaConfigVal, diags.Append(err) + } + if providerSchema == nil { + return metaConfigVal, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) + } + if n.ProviderMetas != nil { + if m, ok := n.ProviderMetas[n.ResolvedProvider.Provider]; ok && m != nil { + // if the provider doesn't support this feature, throw an error + if providerSchema.ProviderMeta == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", n.ResolvedProvider.Provider.String()), + Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr.Resource), + Subject: &m.ProviderRange, + }) + } else { + var configDiags tfdiags.Diagnostics + metaConfigVal, _, configDiags = ctx.EvaluateBlock(m.Config, providerSchema.ProviderMeta, nil, EvalDataForNoInstanceKey) + diags = diags.Append(configDiags) + } + } + } + return metaConfigVal, diags +} + +// planDataSource deals with the main part of the data resource lifecycle: +// either actually reading from the data source or generating a plan to do so. +// +// currentState is the current state for the data source, and the new state is +// returned. While data sources are read-only, we need to start with the prior +// state to determine if we have a change or not. If we needed to read a new +// value, but it still matches the previous state, then we can record a NoNop +// change. If the states don't match then we record a Read change so that the +// new value is applied to the state. +func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentState *states.ResourceInstanceObject) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var configVal cty.Value + + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, nil, diags.Append(err) + } + if providerSchema == nil { + return nil, nil, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) + } + + config := *n.Config + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) + return nil, nil, diags + } + + objTy := schema.ImpliedType() + priorVal := cty.NullVal(objTy) + if currentState != nil { + priorVal = currentState.Value + } + + forEach, _ := evaluateForEachExpression(config.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + + var configDiags tfdiags.Diagnostics + configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, diags + } + + configKnown := configVal.IsWhollyKnown() + // If our configuration contains any unknown values, or we depend on any + // unknown values then we must defer the read to the apply phase by + // producing a "Read" change for this resource, and a placeholder value for + // it in the state. + if n.forcePlanReadData(ctx) || !configKnown { + if configKnown { + log.Printf("[TRACE] planDataSource: %s configuration is fully known, but we're forcing a read plan to be created", n.Addr) + } else { + log.Printf("[TRACE] planDataSource: %s configuration not fully known yet, so deferring to apply phase", n.Addr) + } + + proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Addr, states.CurrentGen, priorVal, proposedNewVal) + })) + if diags.HasErrors() { + return nil, nil, diags + } + + // Apply detects that the data source will need to be read by the After + // value containing unknowns from PlanDataResourceObject. + plannedChange := &plans.ResourceInstanceChange{ + Addr: n.Addr, + ProviderAddr: n.ResolvedProvider, + Change: plans.Change{ + Action: plans.Read, + Before: priorVal, + After: proposedNewVal, + }, + } + + plannedNewState := &states.ResourceInstanceObject{ + Value: proposedNewVal, + Status: states.ObjectPlanned, + } + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Addr, states.CurrentGen, plans.Read, priorVal, proposedNewVal) + })) + + return plannedChange, plannedNewState, diags + } + + // While this isn't a "diff", continue to call this for data sources. + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Addr, states.CurrentGen, priorVal, configVal) + })) + if diags.HasErrors() { + return nil, nil, diags + } + // We have a complete configuration with no dependencies to wait on, so we + // can read the data source into the state. + newVal, readDiags := n.readDataSource(ctx, configVal) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return nil, nil, diags + } + + // if we have a prior value, we can check for any irregularities in the response + if !priorVal.IsNull() { + // We drop marks on the values used here as the result is only + // temporarily used for validation. + unmarkedConfigVal, _ := configVal.UnmarkDeep() + unmarkedPriorVal, _ := priorVal.UnmarkDeep() + + // While we don't propose planned changes for data sources, we can + // generate a proposed value for comparison to ensure the data source + // is returning a result following the rules of the provider contract. + proposedVal := objchange.ProposedNewObject(schema, unmarkedPriorVal, unmarkedConfigVal) + if errs := objchange.AssertObjectCompatible(schema, proposedVal, newVal); len(errs) > 0 { + // Resources have the LegacyTypeSystem field to signal when they are + // using an SDK which may not produce precise values. While data + // sources are read-only, they can still return a value which is not + // compatible with the config+schema. Since we can't detect the legacy + // type system, we can only warn about this for now. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s.", + n.ResolvedProvider, n.Addr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } + } + + plannedNewState := &states.ResourceInstanceObject{ + Value: newVal, + Status: states.ObjectReady, + } + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Addr, states.CurrentGen, plans.Update, priorVal, newVal) + })) + return nil, plannedNewState, diags +} + +// forcePlanReadData determines if we need to override the usual behavior of +// immediately reading from the data source where possible, instead forcing us +// to generate a plan. +func (n *NodeAbstractResourceInstance) forcePlanReadData(ctx EvalContext) bool { + // Check and see if any depends_on dependencies have + // changes, since they won't show up as changes in the + // configuration. + changes := ctx.Changes() + for _, d := range n.dependsOn { + if d.Resource.Mode == addrs.DataResourceMode { + // Data sources have no external side effects, so they pose a need + // to delay this read. If they do have a change planned, it must be + // because of a dependency on a managed resource, in which case + // we'll also encounter it in this list of dependencies. + continue + } + + for _, change := range changes.GetChangesForConfigResource(d) { + if change != nil && change.Action != plans.NoOp { + return true + } + } + } + return false +} + +// apply deals with the main part of the data resource lifecycle: either +// actually reading from the data source or generating a plan to do so. +func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned *plans.ResourceInstanceChange) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, diags.Append(err) + } + if providerSchema == nil { + return nil, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) + } + + if planned != nil && planned.Action != plans.Read { + // If any other action gets in here then that's always a bug; this + // EvalNode only deals with reading. + diags = diags.Append(fmt.Errorf( + "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", + planned.Action, n.Addr, + )) + return nil, diags + } + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(n.Addr, states.CurrentGen, planned.Action, planned.Before, planned.After) + })) + if diags.HasErrors() { + return nil, diags + } + + config := *n.Config + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource().Resource) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) + return nil, diags + } + + forEach, _ := evaluateForEachExpression(config.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.Addr.Resource.Key, forEach) + + configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags + } + + newVal, readDiags := n.readDataSource(ctx, configVal) + diags = diags.Append(readDiags) + if diags.HasErrors() { + return nil, diags + } + + state := &states.ResourceInstanceObject{ + Value: newVal, + Status: states.ObjectReady, + } + + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Addr, states.CurrentGen, newVal, diags.Err()) + })) + + return state, diags +} + +// evalApplyProvisioners determines if provisioners need to be run, and if so +// executes the provisioners for a resource and returns an updated error if +// provisioning fails. +func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if state == nil { + log.Printf("[TRACE] evalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) + return nil + } + if when == configs.ProvisionerWhenCreate && !createNew { + // If we're not creating a new resource, then don't run provisioners + log.Printf("[TRACE] evalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) + return nil + } + if state.Status == states.ObjectTainted { + // No point in provisioning an object that is already tainted, since + // it's going to get recreated on the next apply anyway. + log.Printf("[TRACE] evalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) + return nil + } + + provs := filterProvisioners(n.Config, when) + if len(provs) == 0 { + // We have no provisioners, so don't do anything + return nil + } + + // Call pre hook + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionInstance(n.Addr, state.Value) + })) + if diags.HasErrors() { + return diags + } + + // If there are no errors, then we append it to our output error + // if we have one, otherwise we just output it. + err := n.applyProvisioners(ctx, state, when, provs) + if err != nil { + diags = diags.Append(err) + log.Printf("[TRACE] evalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", n.Addr) + return diags + } + + // Call post hook + return diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionInstance(n.Addr, state.Value) + })) +} + +// filterProvisioners filters the provisioners on the resource to only +// the provisioners specified by the "when" option. +func filterProvisioners(config *configs.Resource, when configs.ProvisionerWhen) []*configs.Provisioner { + // Fast path the zero case + if config == nil || config.Managed == nil { + return nil + } + + if len(config.Managed.Provisioners) == 0 { + return nil + } + + result := make([]*configs.Provisioner, 0, len(config.Managed.Provisioners)) + for _, p := range config.Managed.Provisioners { + if p.When == when { + result = append(result, p) + } + } + + return result +} + +// applyProvisioners executes the provisioners for a resource. +func (n *NodeAbstractResourceInstance) applyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, when configs.ProvisionerWhen, provs []*configs.Provisioner) error { + var diags tfdiags.Diagnostics + + // this self is only used for destroy provisioner evaluation, and must + // refer to the last known value of the resource. + self := state.Value + + var evalScope func(EvalContext, hcl.Body, cty.Value, *configschema.Block) (cty.Value, tfdiags.Diagnostics) + switch when { + case configs.ProvisionerWhenDestroy: + evalScope = n.evalDestroyProvisionerConfig + default: + evalScope = n.evalProvisionerConfig + } + + // If there's a connection block defined directly inside the resource block + // then it'll serve as a base connection configuration for all of the + // provisioners. + var baseConn hcl.Body + if n.Config.Managed != nil && n.Config.Managed.Connection != nil { + baseConn = n.Config.Managed.Connection.Config + } + + for _, prov := range provs { + log.Printf("[TRACE] applyProvisioners: provisioning %s with %q", n.Addr, prov.Type) + + // Get the provisioner + provisioner, err := ctx.Provisioner(prov.Type) + if err != nil { + diags = diags.Append(err) + return diags.Err() + } + + schema := ctx.ProvisionerSchema(prov.Type) + + config, configDiags := evalScope(ctx, prov.Config, self, schema) + diags = diags.Append(configDiags) + if diags.HasErrors() { + return diags.Err() + } + + // If the provisioner block contains a connection block of its own then + // it can override the base connection configuration, if any. + var localConn hcl.Body + if prov.Connection != nil { + localConn = prov.Connection.Config + } + + var connBody hcl.Body + switch { + case baseConn != nil && localConn != nil: + // Our standard merging logic applies here, similar to what we do + // with _override.tf configuration files: arguments from the + // base connection block will be masked by any arguments of the + // same name in the local connection block. + connBody = configs.MergeBodies(baseConn, localConn) + case baseConn != nil: + connBody = baseConn + case localConn != nil: + connBody = localConn + } + + // start with an empty connInfo + connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) + + if connBody != nil { + var connInfoDiags tfdiags.Diagnostics + connInfo, connInfoDiags = evalScope(ctx, connBody, self, connectionBlockSupersetSchema) + diags = diags.Append(connInfoDiags) + if diags.HasErrors() { + return diags.Err() + } + } + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionInstanceStep(n.Addr, prov.Type) + }) + if err != nil { + return err + } + } + + // The output function + outputFn := func(msg string) { + ctx.Hook(func(h Hook) (HookAction, error) { + h.ProvisionOutput(n.Addr, prov.Type, msg) + return HookActionContinue, nil + }) + } + + // If our config or connection info contains any marked values, ensure + // those are stripped out before sending to the provisioner. Unlike + // resources, we have no need to capture the marked paths and reapply + // later. + unmarkedConfig, configMarks := config.UnmarkDeep() + unmarkedConnInfo, _ := connInfo.UnmarkDeep() + + // Marks on the config might result in leaking sensitive values through + // provisioner logging, so we conservatively suppress all output in + // this case. This should not apply to connection info values, which + // provisioners ought not to be logging anyway. + if len(configMarks) > 0 { + outputFn = func(msg string) { + ctx.Hook(func(h Hook) (HookAction, error) { + h.ProvisionOutput(n.Addr, prov.Type, "(output suppressed due to sensitive value in config)") + return HookActionContinue, nil + }) + } + } + + output := CallbackUIOutput{OutputFn: outputFn} + resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: unmarkedConfig, + Connection: unmarkedConnInfo, + UIOutput: &output, + }) + applyDiags := resp.Diagnostics.InConfigBody(prov.Config) + + // Call post hook + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionInstanceStep(n.Addr, prov.Type, applyDiags.Err()) + }) + + switch prov.OnFailure { + case configs.ProvisionerOnFailureContinue: + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) + } else { + // Maybe there are warnings that we still want to see + diags = diags.Append(applyDiags) + } + default: + diags = diags.Append(applyDiags) + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) + return diags.Err() + } + } + + // Deal with the hook + if hookErr != nil { + return hookErr + } + } + + // we have to drop warning-only diagnostics for now + if diags.HasErrors() { + return diags.ErrWithWarnings() + } + + // log any warnings since we can't return them + if e := diags.ErrWithWarnings(); e != nil { + log.Printf("[WARN] applyProvisioners %s: %v", n.Addr, e) + } + + return nil +} + +func (n *NodeAbstractResourceInstance) evalProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + forEach, forEachDiags := evaluateForEachExpression(n.Config.ForEach, ctx) + diags = diags.Append(forEachDiags) + + keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + + config, _, configDiags := ctx.EvaluateBlock(body, schema, n.ResourceInstanceAddr().Resource, keyData) + diags = diags.Append(configDiags) + + return config, diags +} + +// during destroy a provisioner can only evaluate within the scope of the parent resource +func (n *NodeAbstractResourceInstance) evalDestroyProvisionerConfig(ctx EvalContext, body hcl.Body, self cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // For a destroy-time provisioner forEach is intentionally nil here, + // which EvalDataForInstanceKey responds to by not populating EachValue + // in its result. That's okay because each.value is prohibited for + // destroy-time provisioners. + keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, nil) + + evalScope := ctx.EvaluationScope(n.ResourceInstanceAddr().Resource, keyData) + config, evalDiags := evalScope.EvalSelfBlock(body, self, schema, keyData) + diags = diags.Append(evalDiags) + + return config, diags +} + +// apply accepts an applyConfig, instead of using n.Config, so destroy plans can +// send a nil config. Most of the errors generated in apply are returned as +// diagnostics, but if provider.ApplyResourceChange itself fails, that error is +// returned as an error and nil diags are returned. +func (n *NodeAbstractResourceInstance) apply( + ctx EvalContext, + state *states.ResourceInstanceObject, + change *plans.ResourceInstanceChange, + applyConfig *configs.Resource, + createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + + var diags tfdiags.Diagnostics + if state == nil { + state = &states.ResourceInstanceObject{} + } + + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return nil, diags.Append(err) + } + schema, _ := providerSchema.SchemaForResourceType(n.Addr.Resource.Resource.Mode, n.Addr.Resource.Resource.Type) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type)) + return nil, diags + } + + log.Printf("[INFO] Starting apply for %s", n.Addr) + + configVal := cty.NullVal(cty.DynamicPseudoType) + if applyConfig != nil { + var configDiags tfdiags.Diagnostics + forEach, _ := evaluateForEachExpression(applyConfig.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + configVal, _, configDiags = ctx.EvaluateBlock(applyConfig.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags + } + } + + if !configVal.IsWhollyKnown() { + diags = diags.Append(fmt.Errorf( + "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", + n.Addr, + )) + return nil, diags + } + + metaConfigVal, metaDiags := n.providerMetas(ctx) + diags = diags.Append(metaDiags) + if diags.HasErrors() { + return nil, diags + } + + log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr, change.Action) + + // If our config, Before or After value contain any marked values, + // ensure those are stripped out before sending + // this to the provider + unmarkedConfigVal, _ := configVal.UnmarkDeep() + unmarkedBefore, beforePaths := change.Before.UnmarkDeepWithPaths() + unmarkedAfter, afterPaths := change.After.UnmarkDeepWithPaths() + + // If we have an Update action, our before and after values are equal, + // and only differ on their sensitivity, the newVal is the after val + // and we should not communicate with the provider. We do need to update + // the state with this new value, to ensure the sensitivity change is + // persisted. + eqV := unmarkedBefore.Equals(unmarkedAfter) + eq := eqV.IsKnown() && eqV.True() + if change.Action == plans.Update && eq && !marksEqual(beforePaths, afterPaths) { + // Copy the previous state, changing only the value + newState := &states.ResourceInstanceObject{ + CreateBeforeDestroy: state.CreateBeforeDestroy, + Dependencies: state.Dependencies, + Private: state.Private, + Status: state.Status, + Value: change.After, + } + return newState, diags + } + + resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: n.Addr.Resource.Resource.Type, + PriorState: unmarkedBefore, + Config: unmarkedConfigVal, + PlannedState: unmarkedAfter, + PlannedPrivate: change.Private, + ProviderMeta: metaConfigVal, + }) + applyDiags := resp.Diagnostics + if applyConfig != nil { + applyDiags = applyDiags.InConfigBody(applyConfig.Config) + } + diags = diags.Append(applyDiags) + + // Even if there are errors in the returned diagnostics, the provider may + // have returned a _partial_ state for an object that already exists but + // failed to fully configure, and so the remaining code must always run + // to completion but must be defensive against the new value being + // incomplete. + newVal := resp.NewState + + // If we have paths to mark, mark those on this new value + if len(afterPaths) > 0 { + newVal = newVal.MarkWithPaths(afterPaths) + } + + if newVal == cty.NilVal { + // Providers are supposed to return a partial new value even when errors + // occur, but sometimes they don't and so in that case we'll patch that up + // by just using the prior state, so we'll at least keep track of the + // object for the user to retry. + newVal = change.Before + + // As a special case, we'll set the new value to null if it looks like + // we were trying to execute a delete, because the provider in this case + // probably left the newVal unset intending it to be interpreted as "null". + if change.After.IsNull() { + newVal = cty.NullVal(schema.ImpliedType()) + } + + if !diags.HasErrors() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid nil value after apply for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.String(), n.Addr.String(), + ), + )) + } + } + + var conformDiags tfdiags.Diagnostics + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + conformDiags = conformDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ResolvedProvider.String(), tfdiags.FormatErrorPrefixed(err, n.Addr.String()), + ), + )) + } + diags = diags.Append(conformDiags) + if conformDiags.HasErrors() { + // Bail early in this particular case, because an object that doesn't + // conform to the schema can't be saved in the state anyway -- the + // serializer will reject it. + return nil, diags + } + + // After this point we have a type-conforming result object and so we + // must always run to completion to ensure it can be saved. If n.Error + // is set then we must not return a non-nil error, in order to allow + // evaluation to continue to a later point where our state object will + // be saved. + + // By this point there must not be any unknown values remaining in our + // object, because we've applied the change and we can't save unknowns + // in our persistent state. If any are present then we will indicate an + // error (which is always a bug in the provider) but we will also replace + // them with nulls so that we can successfully save the portions of the + // returned value that are known. + if !newVal.IsWhollyKnown() { + // To generate better error messages, we'll go for a walk through the + // value and make a separate diagnostic for each unknown value we + // find. + cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { + if !val.IsKnown() { + pathStr := tfdiags.FormatCtyPath(path) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", + n.Addr, pathStr, + ), + )) + } + return true, nil + }) + + // NOTE: This operation can potentially be lossy if there are multiple + // elements in a set that differ only by unknown values: after + // replacing with null these will be merged together into a single set + // element. Since we can only get here in the presence of a provider + // bug, we accept this because storing a result here is always a + // best-effort sort of thing. + newVal = cty.UnknownAsNull(newVal) + } + + if change.Action != plans.Delete && !diags.HasErrors() { + // Only values that were marked as unknown in the planned value are allowed + // to change during the apply operation. (We do this after the unknown-ness + // check above so that we also catch anything that became unknown after + // being known during plan.) + // + // If we are returning other errors anyway then we'll give this + // a pass since the other errors are usually the explanation for + // this one and so it's more helpful to let the user focus on the + // root cause rather than distract with this extra problem. + if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ResolvedProvider.String(), n.Addr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + + // The sort of inconsistency we won't catch here is if a known value + // in the plan is changed during apply. That can cause downstream + // problems because a dependent resource would make its own plan based + // on the planned value, and thus get a different result during the + // apply phase. This will usually lead to a "Provider produced invalid plan" + // error that incorrectly blames the downstream resource for the change. + + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent result after apply", + fmt.Sprintf( + "When applying changes to %s, provider %q produced an unexpected new value: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.Addr, n.ResolvedProvider.String(), tfdiags.FormatError(err), + ), + )) + } + } + } + } + + // If a provider returns a null or non-null object at the wrong time then + // we still want to save that but it often causes some confusing behaviors + // where it seems like Terraform is failing to take any action at all, + // so we'll generate some errors to draw attention to it. + if !diags.HasErrors() { + if change.Action == plans.Delete && !newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", + change.Action, n.Addr, + ), + )) + } + if change.Action != plans.Delete && newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", + change.Action, n.Addr, + ), + )) + } + } + + switch { + case diags.HasErrors() && newVal.IsNull(): + // Sometimes providers return a null value when an operation fails for + // some reason, but we'd rather keep the prior state so that the error + // can be corrected on a subsequent run. We must only do this for null + // new value though, or else we may discard partial updates the + // provider was able to complete. Otherwise, we'll continue using the + // prior state as the new value, making this effectively a no-op. If + // the item really _has_ been deleted then our next refresh will detect + // that and fix it up. + return state.DeepCopy(), diags + + case diags.HasErrors() && !newVal.IsNull(): + // if we have an error, make sure we restore the object status in the new state + newState := &states.ResourceInstanceObject{ + Status: state.Status, + Value: newVal, + Private: resp.Private, + CreateBeforeDestroy: createBeforeDestroy, + } + return newState, diags + + case !newVal.IsNull(): + // Non error case with a new state + newState := &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: newVal, + Private: resp.Private, + CreateBeforeDestroy: createBeforeDestroy, + } + return newState, diags + + default: + // Non error case, were the object was deleted + return nil, diags + } +} diff --git a/terraform/node_resource_abstract_instance_test.go b/terraform/node_resource_abstract_instance_test.go index 919e4bac9..ff00147c8 100644 --- a/terraform/node_resource_abstract_instance_test.go +++ b/terraform/node_resource_abstract_instance_test.go @@ -6,6 +6,9 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" ) func TestNodeAbstractResourceInstanceProvider(t *testing.T) { @@ -109,3 +112,47 @@ func TestNodeAbstractResourceInstanceProvider(t *testing.T) { }) } } + +func TestNodeAbstractResourceInstance_WriteResourceInstanceState(t *testing.T) { + state := states.NewState() + ctx := new(MockEvalContext) + ctx.StateState = state.SyncWrapper() + ctx.PathPath = addrs.RootModuleInstance + + mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }) + + obj := &states.ResourceInstanceObject{ + Value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-abc123"), + }), + Status: states.ObjectReady, + } + + node := &NodeAbstractResourceInstance{ + Addr: mustResourceInstanceAddr("aws_instance.foo"), + // instanceState: obj, + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + }, + } + ctx.ProviderProvider = mockProvider + ctx.ProviderSchemaSchema = mockProvider.ProviderSchema() + + err := node.writeResourceInstanceState(ctx, obj, nil, workingState) + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + checkStateString(t, state, ` +aws_instance.foo: + ID = i-abc123 + provider = provider["registry.terraform.io/hashicorp/aws"] + `) +} diff --git a/terraform/node_resource_abstract_test.go b/terraform/node_resource_abstract_test.go index 6faafe1e8..a0075889d 100644 --- a/terraform/node_resource_abstract_test.go +++ b/terraform/node_resource_abstract_test.go @@ -6,6 +6,10 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" ) func TestNodeAbstractResourceProvider(t *testing.T) { @@ -107,3 +111,128 @@ func TestNodeAbstractResourceProvider(t *testing.T) { }) } } + +func TestNodeAbstractResource_ReadResourceInstanceState(t *testing.T) { + mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }) + + tests := map[string]struct { + State *states.State + Node *NodeAbstractResource + ExpectedInstanceId string + }{ + "ReadState gets primary instance state": { + State: states.BuildState(func(s *states.SyncState) { + providerAddr := addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + } + oneAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance) + s.SetResourceProvider(oneAddr, providerAddr) + s.SetResourceInstanceCurrent(oneAddr.Instance(addrs.NoKey), &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, providerAddr) + }), + Node: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("aws_instance.bar"), + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + }, + ExpectedInstanceId: "i-abc123", + }, + } + + for k, test := range tests { + t.Run(k, func(t *testing.T) { + ctx := new(MockEvalContext) + ctx.StateState = test.State.SyncWrapper() + ctx.PathPath = addrs.RootModuleInstance + ctx.ProviderSchemaSchema = mockProvider.ProviderSchema() + ctx.ProviderProvider = providers.Interface(mockProvider) + + got, err := test.Node.readResourceInstanceState(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance)) + if err != nil { + t.Fatalf("[%s] Got err: %#v", k, err.Error()) + } + + expected := test.ExpectedInstanceId + + if !(got != nil && got.Value.GetAttr("id") == cty.StringVal(expected)) { + t.Fatalf("[%s] Expected output with ID %#v, got: %#v", k, expected, got) + } + }) + } +} + +func TestNodeAbstractResource_ReadResourceInstanceStateDeposed(t *testing.T) { + mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }) + + tests := map[string]struct { + State *states.State + Node *NodeAbstractResource + ExpectedInstanceId string + }{ + "ReadStateDeposed gets deposed instance": { + State: states.BuildState(func(s *states.SyncState) { + providerAddr := addrs.AbsProviderConfig{ + Provider: addrs.NewDefaultProvider("aws"), + Module: addrs.RootModule, + } + oneAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + }.Absolute(addrs.RootModuleInstance) + s.SetResourceProvider(oneAddr, providerAddr) + s.SetResourceInstanceDeposed(oneAddr.Instance(addrs.NoKey), states.DeposedKey("00000001"), &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"i-abc123"}`), + }, providerAddr) + }), + Node: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("aws_instance.bar"), + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + }, + ExpectedInstanceId: "i-abc123", + }, + } + for k, test := range tests { + t.Run(k, func(t *testing.T) { + ctx := new(MockEvalContext) + ctx.StateState = test.State.SyncWrapper() + ctx.PathPath = addrs.RootModuleInstance + ctx.ProviderSchemaSchema = mockProvider.ProviderSchema() + ctx.ProviderProvider = providers.Interface(mockProvider) + + key := states.DeposedKey("00000001") // shim from legacy state assigns 0th deposed index this key + + got, err := test.Node.readResourceInstanceStateDeposed(ctx, test.Node.Addr.Resource.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), key) + if err != nil { + t.Fatalf("[%s] Got err: %#v", k, err.Error()) + } + + expected := test.ExpectedInstanceId + + if !(got != nil && got.Value.GetAttr("id") == cty.StringVal(expected)) { + t.Fatalf("[%s] Expected output with ID %#v, got: %#v", k, expected, got) + } + }) + } +} diff --git a/terraform/node_resource_apply.go b/terraform/node_resource_apply.go index ff09ba614..7c23483b7 100644 --- a/terraform/node_resource_apply.go +++ b/terraform/node_resource_apply.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" ) // nodeExpandApplyableResource handles the first layer of resource @@ -42,10 +43,7 @@ func (n *nodeExpandApplyableResource) DynamicExpand(ctx EvalContext) (*Graph, er expander := ctx.InstanceExpander() moduleInstances := expander.ExpandModule(n.Addr.Module) - var resources []addrs.AbsResource for _, module := range moduleInstances { - resAddr := n.Addr.Resource.Absolute(module) - resources = append(resources, resAddr) g.Add(&NodeApplyableResource{ NodeAbstractResource: n.NodeAbstractResource, Addr: n.Addr.Resource.Absolute(module), @@ -102,13 +100,12 @@ func (n *NodeApplyableResource) References() []*addrs.Reference { } // GraphNodeExecutable -func (n *NodeApplyableResource) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeApplyableResource) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { if n.Config == nil { // Nothing to do, then. log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", n.Name()) return nil } - err := n.writeResourceState(ctx, n.Addr) - return err + return n.writeResourceState(ctx, n.Addr) } diff --git a/terraform/node_resource_apply_instance.go b/terraform/node_resource_apply_instance.go index a1d35b81a..262a0e96a 100644 --- a/terraform/node_resource_apply_instance.go +++ b/terraform/node_resource_apply_instance.go @@ -2,10 +2,12 @@ package terraform import ( "fmt" + "log" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/tfdiags" ) @@ -101,7 +103,7 @@ func (n *NodeApplyableResourceInstance) AttachDependencies(deps []addrs.ConfigRe } // GraphNodeExecutable -func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { addr := n.ResourceInstanceAddr() if n.Config == nil { @@ -110,7 +112,6 @@ func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperatio // https://github.com/hashicorp/terraform/issues/21258 // To avoid an outright crash here, we'll instead return an explicit // error. - var diags tfdiags.Diagnostics diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, "Resource node has no configuration attached", @@ -119,7 +120,7 @@ func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperatio addr, ), )) - return diags.Err() + return diags } // Eval info is different depending on what kind of resource this is @@ -133,93 +134,69 @@ func (n *NodeApplyableResourceInstance) Execute(ctx EvalContext, op walkOperatio } } -func (n *NodeApplyableResourceInstance) dataResourceExecute(ctx EvalContext) error { - addr := n.ResourceInstanceAddr().Resource - - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err +func (n *NodeApplyableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } change, err := n.readDiff(ctx, providerSchema) - if err != nil { - return err + diags = diags.Append(err) + if diags.HasErrors() { + return diags } // Stop early if we don't actually have a diff if change == nil { - return EvalEarlyExitError{} + return diags } - // In this particular call to EvalReadData we include our planned + // In this particular call to applyDataSource we include our planned // change, which signals that we expect this read to complete fully // with no unknown values; it'll produce an error if not. - var state *states.ResourceInstanceObject - readDataApply := &evalReadDataApply{ - evalReadData{ - Addr: addr, - Config: n.Config, - Planned: &change, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &state, - }, - } - _, err = readDataApply.Eval(ctx) - if err != nil { - return err + state, applyDiags := n.applyDataSource(ctx, change) + diags = diags.Append(applyDiags) + if diags.HasErrors() { + return diags } - writeState := &EvalWriteState{ - Addr: addr, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - } - _, err = writeState.Eval(ctx) - if err != nil { - return err + // We don't write dependencies for datasources + diags = diags.Append(n.writeResourceInstanceState(ctx, state, nil, workingState)) + if diags.HasErrors() { + return diags } - writeDiff := &EvalWriteDiff{ - Addr: addr, - ProviderSchema: &providerSchema, - Change: nil, - } - _, err = writeDiff.Eval(ctx) - if err != nil { - return err - } + diags = diags.Append(n.writeChange(ctx, nil, "")) - UpdateStateHook(ctx) - return nil + diags = diags.Append(updateStateHook(ctx)) + return diags } -func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) error { +func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { // Declare a bunch of variables that are used for state during // evaluation. Most of this are written to by-address below. var state *states.ResourceInstanceObject - var createNew bool var createBeforeDestroyEnabled bool var deposedKey states.DeposedKey addr := n.ResourceInstanceAddr().Resource - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } // Get the saved diff for apply diffApply, err := n.readDiff(ctx, providerSchema) - if err != nil { - return err + diags = diags.Append(err) + if diags.HasErrors() { + return diags } // We don't want to do any destroys // (these are handled by NodeDestroyResourceInstance instead) if diffApply == nil || diffApply.Action == plans.Delete { - return EvalEarlyExitError{} + return diags } destroy := (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) @@ -231,220 +208,221 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) } if createBeforeDestroyEnabled { - deposeState := &EvalDeposeState{ - Addr: addr, - ForceKey: n.PreallocatedDeposedKey, - OutputKey: &deposedKey, - } - _, err = deposeState.Eval(ctx) - if err != nil { - return err + state := ctx.State() + if n.PreallocatedDeposedKey == states.NotDeposed { + deposedKey = state.DeposeResourceInstanceObject(n.Addr) + } else { + deposedKey = n.PreallocatedDeposedKey + state.DeposeResourceInstanceObjectForceKey(n.Addr, deposedKey) } + log.Printf("[TRACE] managedResourceExecute: prior object for %s now deposed with key %s", n.Addr, deposedKey) } - readState := &EvalReadState{ - Addr: addr, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - } - _, err = readState.Eval(ctx) - if err != nil { - return err + state, err = n.readResourceInstanceState(ctx, n.ResourceInstanceAddr()) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } // Get the saved diff diff, err := n.readDiff(ctx, providerSchema) - if err != nil { - return err + diags = diags.Append(err) + if diags.HasErrors() { + return diags } // Make a new diff, in case we've learned new values in the state // during apply which we can now incorporate. - evalDiff := &EvalDiff{ - Addr: addr, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &state, - PreviousDiff: &diff, - OutputChange: &diffApply, - OutputState: &state, - } - _, err = evalDiff.Eval(ctx) - if err != nil { - return err + diffApply, _, planDiags := n.plan(ctx, diff, state, false) + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags } // Compare the diffs - checkPlannedChange := &EvalCheckPlannedChange{ - Addr: addr, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Planned: &diff, - Actual: &diffApply, - } - _, err = checkPlannedChange.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.checkPlannedChange(ctx, diff, diffApply, providerSchema)) + if diags.HasErrors() { + return diags } - readState = &EvalReadState{ - Addr: addr, - Provider: &provider, - ProviderSchema: &providerSchema, - - Output: &state, - } - _, err = readState.Eval(ctx) - if err != nil { - return err + state, err = n.readResourceInstanceState(ctx, n.ResourceInstanceAddr()) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - reduceDiff := &EvalReduceDiff{ - Addr: addr, - InChange: &diffApply, - Destroy: false, - OutChange: &diffApply, - } - _, err = reduceDiff.Eval(ctx) - if err != nil { - return err - } - - // EvalReduceDiff may have simplified our planned change + diffApply = reducePlan(addr, diffApply, false) + // reducePlan may have simplified our planned change // into a NoOp if it only requires destroying, since destroying // is handled by NodeDestroyResourceInstance. if diffApply == nil || diffApply.Action == plans.NoOp { - return EvalEarlyExitError{} + return diags } - evalApplyPre := &EvalApplyPre{ - Addr: addr, - State: &state, - Change: &diffApply, - } - _, err = evalApplyPre.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.preApplyHook(ctx, diffApply)) + if diags.HasErrors() { + return diags } - var applyError error - evalApply := &EvalApply{ - Addr: addr, - Config: n.Config, - State: &state, - Change: &diffApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - Output: &state, - Error: &applyError, - CreateNew: &createNew, - CreateBeforeDestroy: n.CreateBeforeDestroy(), - } - _, err = evalApply.Eval(ctx) - if err != nil { - return err - } + state, applyDiags := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy()) + diags = diags.Append(applyDiags) // We clear the change out here so that future nodes don't see a change // that is already complete. - writeDiff := &EvalWriteDiff{ - Addr: addr, - ProviderSchema: &providerSchema, - Change: nil, - } - _, err = writeDiff.Eval(ctx) + err = n.writeChange(ctx, nil, "") if err != nil { - return err + return diags.Append(err) } - evalMaybeTainted := &EvalMaybeTainted{ - Addr: addr, - State: &state, - Change: &diffApply, - Error: &applyError, - } - _, err = evalMaybeTainted.Eval(ctx) + state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err()) + + err = n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState) if err != nil { - return err + return diags.Append(err) } - writeState := &EvalWriteState{ - Addr: addr, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - } - _, err = writeState.Eval(ctx) + // Run Provisioners + createNew := (diffApply.Action == plans.Create || diffApply.Action.IsReplace()) + applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate) + // the provisioner errors count as port of the apply error, so we can bundle the diags + diags = diags.Append(applyProvisionersDiags) + + state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, diags.Err()) + + err = n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState) if err != nil { - return err + return diags.Append(err) } - applyProvisioners := &EvalApplyProvisioners{ - Addr: addr, - State: &state, // EvalApplyProvisioners will skip if already tainted - ResourceConfig: n.Config, - CreateNew: &createNew, - Error: &applyError, - When: configs.ProvisionerWhenCreate, - } - _, err = applyProvisioners.Eval(ctx) - if err != nil { - return err - } - - evalMaybeTainted = &EvalMaybeTainted{ - Addr: addr, - State: &state, - Change: &diffApply, - Error: &applyError, - } - _, err = evalMaybeTainted.Eval(ctx) - if err != nil { - return err - } - - writeState = &EvalWriteState{ - Addr: addr, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - Dependencies: &n.Dependencies, - } - _, err = writeState.Eval(ctx) - if err != nil { - return err - } - - if createBeforeDestroyEnabled && applyError != nil { - maybeRestoreDesposedObject := &EvalMaybeRestoreDeposedObject{ - Addr: addr, - PlannedChange: &diffApply, - Key: &deposedKey, - } - _, err := maybeRestoreDesposedObject.Eval(ctx) - if err != nil { - return err + if createBeforeDestroyEnabled && diags.HasErrors() { + if deposedKey == states.NotDeposed { + // This should never happen, and so it always indicates a bug. + // We should evaluate this node only if we've previously deposed + // an object as part of the same operation. + if diffApply != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Attempt to restore non-existent deposed object", + fmt.Sprintf( + "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This occurred during a %s action. This is a bug in Terraform; please report it!", + addr, diffApply.Action, + ), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Attempt to restore non-existent deposed object", + fmt.Sprintf( + "Terraform has encountered a bug where it would need to restore a deposed object for %s without knowing a deposed object key for that object. This is a bug in Terraform; please report it!", + addr, + ), + )) + } + } else { + restored := ctx.State().MaybeRestoreResourceInstanceDeposed(addr.Absolute(ctx.Path()), deposedKey) + if restored { + log.Printf("[TRACE] managedResourceExecute: %s deposed object %s was restored as the current object", addr, deposedKey) + } else { + log.Printf("[TRACE] managedResourceExecute: %s deposed object %s remains deposed", addr, deposedKey) + } } } - applyPost := &EvalApplyPost{ - Addr: addr, - State: &state, - Error: &applyError, - } - _, err = applyPost.Eval(ctx) - if err != nil { - return err - } - - UpdateStateHook(ctx) - return nil + diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) + diags = diags.Append(updateStateHook(ctx)) + return diags +} + +// checkPlannedChange produces errors if the _actual_ expected value is not +// compatible with what was recorded in the plan. +// +// Errors here are most often indicative of a bug in the provider, so our error +// messages will report with that in mind. It's also possible that there's a bug +// in Terraform's Core's own "proposed new value" code in EvalDiff. +func (n *NodeApplyableResourceInstance) checkPlannedChange(ctx EvalContext, plannedChange, actualChange *plans.ResourceInstanceChange, providerSchema *ProviderSchema) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + addr := n.ResourceInstanceAddr().Resource + + schema, _ := providerSchema.SchemaForResourceAddr(addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + diags = diags.Append(fmt.Errorf("provider does not support %q", addr.Resource.Type)) + return diags + } + + absAddr := addr.Absolute(ctx.Path()) + + log.Printf("[TRACE] checkPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) + + if plannedChange.Action != actualChange.Action { + switch { + case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: + // It's okay for an update to become a NoOp once we've filled in + // all of the unknown values, since the final values might actually + // match what was there before after all. + log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) + + case (plannedChange.Action == plans.CreateThenDelete && actualChange.Action == plans.DeleteThenCreate) || + (plannedChange.Action == plans.DeleteThenCreate && actualChange.Action == plans.CreateThenDelete): + // If the order of replacement changed, then that is a bug in terraform + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Terraform produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, the planned action changed from %s to %s.\n\nThis is a bug in Terraform and should be reported.", + absAddr, plannedChange.Action, actualChange.Action, + ), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ResolvedProvider.Provider.String(), + plannedChange.Action, actualChange.Action, + ), + )) + } + } + + errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ResolvedProvider.Provider.String(), tfdiags.FormatError(err), + ), + )) + } + return diags +} + +// maybeTainted takes the resource addr, new value, planned change, and possible +// error from an apply operation and return a new instance object marked as +// tainted if it appears that a create operation has failed. +func maybeTainted(addr addrs.AbsResourceInstance, state *states.ResourceInstanceObject, change *plans.ResourceInstanceChange, err error) *states.ResourceInstanceObject { + if state == nil || change == nil || err == nil { + return state + } + if state.Status == states.ObjectTainted { + log.Printf("[TRACE] maybeTainted: %s was already tainted, so nothing to do", addr) + return state + } + if change.Action == plans.Create { + // If there are errors during a _create_ then the object is + // in an undefined state, and so we'll mark it as tainted so + // we can try again on the next run. + // + // We don't do this for other change actions because errors + // during updates will often not change the remote object at all. + // If there _were_ changes prior to the error, it's the provider's + // responsibility to record the effect of those changes in the + // object value it returned. + log.Printf("[TRACE] maybeTainted: %s encountered an error during creation, so it is now marked as tainted", addr) + return state.AsTainted() + } + return state } diff --git a/terraform/node_resource_apply_test.go b/terraform/node_resource_apply_test.go index a841a3563..54b65ec4f 100644 --- a/terraform/node_resource_apply_test.go +++ b/terraform/node_resource_apply_test.go @@ -23,9 +23,9 @@ func TestNodeApplyableResourceExecute(t *testing.T) { }, Addr: mustAbsResourceAddr("test_instance.foo"), } - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } if !state.Empty() { t.Fatalf("expected no state, got:\n %s", state.String()) @@ -48,9 +48,9 @@ func TestNodeApplyableResourceExecute(t *testing.T) { }, Addr: mustAbsResourceAddr("test_instance.foo"), } - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } if state.Empty() { t.Fatal("expected resources in state, got empty state") diff --git a/terraform/node_resource_destroy.go b/terraform/node_resource_destroy.go index 872df3654..fa5ba3dc8 100644 --- a/terraform/node_resource_destroy.go +++ b/terraform/node_resource_destroy.go @@ -5,6 +5,7 @@ import ( "log" "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" @@ -122,7 +123,7 @@ func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { } // GraphNodeExecutable -func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { addr := n.ResourceInstanceAddr() // Get our state @@ -134,112 +135,68 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation) // These vars are updated through pointers at various stages below. var changeApply *plans.ResourceInstanceChange var state *states.ResourceInstanceObject - var provisionerErr error - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } changeApply, err = n.readDiff(ctx, providerSchema) - if err != nil { - return err + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - evalReduceDiff := &EvalReduceDiff{ - Addr: addr.Resource, - InChange: &changeApply, - Destroy: true, - OutChange: &changeApply, - } - _, err = evalReduceDiff.Eval(ctx) - if err != nil { - return err - } - - // EvalReduceDiff may have simplified our planned change + changeApply = reducePlan(addr.Resource, changeApply, true) + // reducePlan may have simplified our planned change // into a NoOp if it does not require destroying. if changeApply == nil || changeApply.Action == plans.NoOp { - return EvalEarlyExitError{} + return diags } - state, err = n.ReadResourceInstanceState(ctx, addr) - if err != nil { - return err + state, err = n.readResourceInstanceState(ctx, addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } // Exit early if the state object is null after reading the state if state == nil || state.Value.IsNull() { - return EvalEarlyExitError{} + return diags } - evalApplyPre := &EvalApplyPre{ - Addr: addr.Resource, - State: &state, - Change: &changeApply, - } - _, err = evalApplyPre.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.preApplyHook(ctx, changeApply)) + if diags.HasErrors() { + return diags } // Run destroy provisioners if not tainted if state != nil && state.Status != states.ObjectTainted { - evalApplyProvisioners := &EvalApplyProvisioners{ - Addr: addr.Resource, - State: &state, - ResourceConfig: n.Config, - Error: &provisionerErr, - When: configs.ProvisionerWhenDestroy, - } - _, err := evalApplyProvisioners.Eval(ctx) - if err != nil { - return err - } - if provisionerErr != nil { + applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy) + diags = diags.Append(applyProvisionersDiags) + // keep the diags separate from the main set until we handle the cleanup + + if diags.HasErrors() { // If we have a provisioning error, then we just call // the post-apply hook now. - evalApplyPost := &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &provisionerErr, - } - _, err = evalApplyPost.Eval(ctx) - if err != nil { - return err - } + diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) + return diags } } // Managed resources need to be destroyed, while data sources // are only removed from state. if addr.Resource.Resource.Mode == addrs.ManagedResourceMode { - evalApply := &EvalApply{ - Addr: addr.Resource, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &changeApply, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - Output: &state, - Error: &provisionerErr, - } - _, err = evalApply.Eval(ctx) - if err != nil { - return err - } + // we pass a nil configuration to apply because we are destroying + s, d := n.apply(ctx, state, changeApply, nil, false) + state, diags = s, diags.Append(d) + // we don't return immediately here on error, so that the state can be + // finalized - evalWriteState := &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - } - _, err = evalWriteState.Eval(ctx) + err := n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState) if err != nil { - return err + return diags.Append(err) } } else { log.Printf("[TRACE] NodeDestroyResourceInstance: removing state object for %s", n.Addr) @@ -247,20 +204,8 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation) state.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) } - evalApplyPost := &EvalApplyPost{ - Addr: addr.Resource, - State: &state, - Error: &provisionerErr, - } - _, err = evalApplyPost.Eval(ctx) - if err != nil { - return err - } - - err = UpdateStateHook(ctx) - if err != nil { - return err - } - - return nil + // create the err value for postApplyHook + diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) + diags = diags.Append(updateStateHook(ctx)) + return diags } diff --git a/terraform/node_resource_destroy_deposed.go b/terraform/node_resource_destroy_deposed.go index a77cdf476..fc6137ddb 100644 --- a/terraform/node_resource_destroy_deposed.go +++ b/terraform/node_resource_destroy_deposed.go @@ -2,11 +2,13 @@ package terraform import ( "fmt" + "log" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert @@ -63,55 +65,22 @@ func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference } // GraphNodeEvalable impl. -func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) error { - addr := n.ResourceInstanceAddr() - - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err +func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + // Read the state for the deposed resource instance + state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - // During the plan walk we always produce a planned destroy change, because - // destroying is the only supported action for deposed objects. - var change *plans.ResourceInstanceChange - var state *states.ResourceInstanceObject - - readStateDeposed := &EvalReadStateDeposed{ - Addr: addr.Resource, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - } - _, err = readStateDeposed.Eval(ctx) - if err != nil { - return err + change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey) + diags = diags.Append(destroyPlanDiags) + if diags.HasErrors() { + return diags } - diffDestroy := &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - DeposedKey: n.DeposedKey, - State: &state, - Output: &change, - } - _, err = diffDestroy.Eval(ctx) - if err != nil { - return err - } - - writeDiff := &EvalWriteDiff{ - Addr: addr.Resource, - DeposedKey: n.DeposedKey, - ProviderSchema: &providerSchema, - Change: &change, - } - _, err = writeDiff.Eval(ctx) - if err != nil { - return err - } - - return nil + diags = diags.Append(n.writeChange(ctx, change, n.DeposedKey)) + return diags } // NodeDestroyDeposedResourceInstanceObject represents deposed resource @@ -181,97 +150,43 @@ func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v b } // GraphNodeExecutable impl. -func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) error { - addr := n.ResourceInstanceAddr().Resource - - var state *states.ResourceInstanceObject +func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { var change *plans.ResourceInstanceChange - var applyError error - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) + // Read the state for the deposed resource instance + state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey) if err != nil { - return err + return diags.Append(err) } - readStateDeposed := &EvalReadStateDeposed{ - Addr: addr, - Output: &state, - Key: n.DeposedKey, - Provider: &provider, - ProviderSchema: &providerSchema, - } - _, err = readStateDeposed.Eval(ctx) - if err != nil { - return err - } - - diffDestroy := &EvalDiffDestroy{ - Addr: addr, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - } - _, err = diffDestroy.Eval(ctx) - if err != nil { - return err + change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey) + diags = diags.Append(destroyPlanDiags) + if diags.HasErrors() { + return diags } // Call pre-apply hook - applyPre := &EvalApplyPre{ - Addr: addr, - State: &state, - Change: &change, - } - _, err = applyPre.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.preApplyHook(ctx, change)) + if diags.HasErrors() { + return diags } - apply := &EvalApply{ - Addr: addr, - Config: nil, // No configuration because we are destroying - State: &state, - Change: &change, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - Output: &state, - Error: &applyError, - } - _, err = apply.Eval(ctx) - if err != nil { - return err - } + // we pass a nil configuration to apply because we are destroying + state, applyDiags := n.apply(ctx, state, change, nil, false) + diags = diags.Append(applyDiags) + // don't return immediately on errors, we need to handle the state // Always write the resource back to the state deposed. If it // was successfully destroyed it will be pruned. If it was not, it will // be caught on the next run. - writeStateDeposed := &EvalWriteStateDeposed{ - Addr: addr, - Key: n.DeposedKey, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - } - _, err = writeStateDeposed.Eval(ctx) + err = n.writeResourceInstanceState(ctx, state) if err != nil { - return err + return diags.Append(err) } - applyPost := &EvalApplyPost{ - Addr: addr, - State: &state, - Error: &applyError, - } - _, err = applyPost.Eval(ctx) - if err != nil { - return err - } - if applyError != nil { - return applyError - } - UpdateStateHook(ctx) - return nil + diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) + + return diags.Append(updateStateHook(ctx)) } // GraphNodeDeposer is an optional interface implemented by graph nodes that @@ -295,3 +210,46 @@ type graphNodeDeposer struct { func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { n.PreallocatedDeposedKey = key } + +func (n *NodeDestroyDeposedResourceInstanceObject) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject) error { + absAddr := n.Addr + key := n.DeposedKey + state := ctx.State() + + if key == states.NotDeposed { + // should never happen + return fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) + } + + if obj == nil { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceDeposed(absAddr, key, nil, n.ResolvedProvider) + log.Printf("[TRACE] writeResourceInstanceStateDeposed: removing state object for %s deposed %s", absAddr, key) + return nil + } + + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + if err != nil { + return err + } + if providerSchema == nil { + // Should never happen, unless our state object is nil + panic("writeResourceInstanceStateDeposed used with no ProviderSchema object") + } + + schema, currentVersion := providerSchema.SchemaForResourceAddr(absAddr.ContainingResource().Resource) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + + log.Printf("[TRACE] writeResourceInstanceStateDeposed: writing state object for %s deposed %s", absAddr, key) + state.SetResourceInstanceDeposed(absAddr, key, src, n.ResolvedProvider) + return nil +} diff --git a/terraform/node_resource_destroy_deposed_test.go b/terraform/node_resource_destroy_deposed_test.go index 584dab5ac..e4410bcf8 100644 --- a/terraform/node_resource_destroy_deposed_test.go +++ b/terraform/node_resource_destroy_deposed_test.go @@ -26,7 +26,7 @@ func TestNodePlanDeposedResourceInstanceObject_Execute(t *testing.T) { ) p := testProvider("test") - p.UpgradeResourceStateResponse = providers.UpgradeResourceStateResponse{ + p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ UpgradedState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("bar"), }), @@ -85,28 +85,32 @@ func TestNodeDestroyDeposedResourceInstanceObject_Execute(t *testing.T) { mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`), ) + schema := &ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + } + p := testProvider("test") - p.UpgradeResourceStateResponse = providers.UpgradeResourceStateResponse{ + p.GetSchemaResponse = getSchemaResponseFromProviderSchema(schema) + + p.UpgradeResourceStateResponse = &providers.UpgradeResourceStateResponse{ UpgradedState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("bar"), }), } ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - ProviderProvider: p, - ProviderSchemaSchema: &ProviderSchema{ - ResourceTypes: map[string]*configschema.Block{ - "test_instance": { - Attributes: map[string]*configschema.Attribute{ - "id": { - Type: cty.String, - Computed: true, - }, - }, - }, - }, - }, - ChangesChanges: plans.NewChanges().SyncWrapper(), + StateState: state.SyncWrapper(), + ProviderProvider: p, + ProviderSchemaSchema: schema, + ChangesChanges: plans.NewChanges().SyncWrapper(), } node := NodeDestroyDeposedResourceInstanceObject{ @@ -128,3 +132,47 @@ func TestNodeDestroyDeposedResourceInstanceObject_Execute(t *testing.T) { t.Fatalf("resources left in state after destroy") } } + +func TestNodeDestroyDeposedResourceInstanceObject_WriteResourceInstanceState(t *testing.T) { + state := states.NewState() + ctx := new(MockEvalContext) + ctx.StateState = state.SyncWrapper() + ctx.PathPath = addrs.RootModuleInstance + mockProvider := mockProviderWithResourceTypeSchema("aws_instance", &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Optional: true, + }, + }, + }) + ctx.ProviderProvider = mockProvider + ctx.ProviderSchemaSchema = mockProvider.ProviderSchema() + + obj := &states.ResourceInstanceObject{ + Value: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("i-abc123"), + }), + Status: states.ObjectReady, + } + node := &NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + }, + Addr: mustResourceInstanceAddr("aws_instance.foo"), + }, + DeposedKey: states.NewDeposedKey(), + } + err := node.writeResourceInstanceState(ctx, obj) + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + checkStateString(t, state, ` +aws_instance.foo: (1 deposed) + ID = + provider = provider["registry.terraform.io/hashicorp/aws"] + Deposed ID 1 = i-abc123 + `) +} diff --git a/terraform/node_resource_plan.go b/terraform/node_resource_plan.go index 466c3a87d..15e97bc36 100644 --- a/terraform/node_resource_plan.go +++ b/terraform/node_resource_plan.go @@ -73,13 +73,11 @@ func (n *nodeExpandPlannableResource) DynamicExpand(ctx EvalContext) (*Graph, er var g Graph expander := ctx.InstanceExpander() - var resources []addrs.AbsResource moduleInstances := expander.ExpandModule(n.Addr.Module) // Add the current expanded resource to the graph for _, module := range moduleInstances { resAddr := n.Addr.Resource.Absolute(module) - resources = append(resources, resAddr) g.Add(&NodePlannableResource{ NodeAbstractResource: n.NodeAbstractResource, Addr: resAddr, @@ -172,21 +170,15 @@ func (n *NodePlannableResource) Name() string { return n.Addr.String() } -// GraphNodeModuleInstance -func (n *NodePlannableResource) ModuleInstance() addrs.ModuleInstance { - return n.Addr.Module -} - // GraphNodeExecutable -func (n *NodePlannableResource) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodePlannableResource) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { if n.Config == nil { // Nothing to do, then. log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", n.Name()) return nil } - err := n.writeResourceState(ctx, n.Addr) - return err + return n.writeResourceState(ctx, n.Addr) } // GraphNodeDestroyerCBD diff --git a/terraform/node_resource_plan_destroy.go b/terraform/node_resource_plan_destroy.go index 0118815bd..3d073795d 100644 --- a/terraform/node_resource_plan_destroy.go +++ b/terraform/node_resource_plan_destroy.go @@ -4,6 +4,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // NodePlanDestroyableResourceInstance represents a resource that is ready @@ -32,7 +33,7 @@ func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceIn } // GraphNodeEvalable -func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { addr := n.ResourceInstanceAddr() // Declare a bunch of variables that are used for state during @@ -41,37 +42,23 @@ func (n *NodePlanDestroyableResourceInstance) Execute(ctx EvalContext, op walkOp var change *plans.ResourceInstanceChange var state *states.ResourceInstanceObject - _, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err + state, err := n.readResourceInstanceState(ctx, addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - state, err = n.ReadResourceInstanceState(ctx, addr) - if err != nil { - return err + change, destroyPlanDiags := n.planDestroy(ctx, state, "") + diags = diags.Append(destroyPlanDiags) + if diags.HasErrors() { + return diags } - diffDestroy := &EvalDiffDestroy{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &state, - Output: &change, - } - _, err = diffDestroy.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.checkPreventDestroy(change)) + if diags.HasErrors() { + return diags } - err = n.checkPreventDestroy(change) - if err != nil { - return err - } - - writeDiff := &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - } - _, err = writeDiff.Eval(ctx) - return err + diags = diags.Append(n.writeChange(ctx, change, "")) + return diags } diff --git a/terraform/node_resource_plan_instance.go b/terraform/node_resource_plan_instance.go index 3810484d0..1f57e85c8 100644 --- a/terraform/node_resource_plan_instance.go +++ b/terraform/node_resource_plan_instance.go @@ -2,9 +2,11 @@ package terraform import ( "fmt" + "log" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/terraform/addrs" ) @@ -30,7 +32,7 @@ var ( ) // GraphNodeEvalable -func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { addr := n.ResourceInstanceAddr() // Eval info is different depending on what kind of resource this is @@ -44,86 +46,52 @@ func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperatio } } -func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) error { +func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { config := n.Config addr := n.ResourceInstanceAddr() var change *plans.ResourceInstanceChange var state *states.ResourceInstanceObject - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - state, err = n.ReadResourceInstanceState(ctx, addr) - if err != nil { - return err + state, err = n.readResourceInstanceState(ctx, addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - validateSelfRef := &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - } - _, err = validateSelfRef.Eval(ctx) - if err != nil { - return err + diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema)) + if diags.HasErrors() { + return diags } - readDataPlan := &evalReadDataPlan{ - evalReadData: evalReadData{ - Addr: addr.Resource, - Config: n.Config, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - OutputChange: &change, - State: &state, - dependsOn: n.dependsOn, - }, - } - _, err = readDataPlan.Eval(ctx) - if err != nil { - return err + change, state, planDiags := n.planDataSource(ctx, state) + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags } // write the data source into both the refresh state and the // working state - writeRefreshState := &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - targetState: refreshState, + diags = diags.Append(n.writeResourceInstanceState(ctx, state, nil, refreshState)) + if diags.HasErrors() { + return diags } - _, err = writeRefreshState.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.writeResourceInstanceState(ctx, state, nil, workingState)) + if diags.HasErrors() { + return diags } - writeState := &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - } - _, err = writeState.Eval(ctx) - if err != nil { - return err - } - - writeDiff := &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - } - _, err = writeDiff.Eval(ctx) - return err + diags = diags.Append(n.writeChange(ctx, change, "")) + return diags } -func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) error { +func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { config := n.Config addr := n.ResourceInstanceAddr() @@ -131,105 +99,65 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) var instanceRefreshState *states.ResourceInstanceObject var instancePlanState *states.ResourceInstanceObject - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err + _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - validateSelfRef := &EvalValidateSelfRef{ - Addr: addr.Resource, - Config: config.Config, - ProviderSchema: &providerSchema, - } - _, err = validateSelfRef.Eval(ctx) - if err != nil { - return err + diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema)) + if diags.HasErrors() { + return diags } - instanceRefreshState, err = n.ReadResourceInstanceState(ctx, addr) - if err != nil { - return err + instanceRefreshState, err = n.readResourceInstanceState(ctx, addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } - refreshLifecycle := &EvalRefreshLifecycle{ - Addr: addr, - Config: n.Config, - State: &instanceRefreshState, - ForceCreateBeforeDestroy: n.ForceCreateBeforeDestroy, - } - _, err = refreshLifecycle.Eval(ctx) - if err != nil { - return err + + // In 0.13 we could be refreshing a resource with no config. + // We should be operating on managed resource, but check here to be certain + if n.Config == nil || n.Config.Managed == nil { + log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr) + } else { + if instanceRefreshState != nil { + instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy + } } // Refresh, maybe if !n.skipRefresh { - refresh := &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &instanceRefreshState, - Output: &instanceRefreshState, - } - _, err = refresh.Eval(ctx) - if err != nil { - return err + s, refreshDiags := n.refresh(ctx, instanceRefreshState) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags } + instanceRefreshState = s - writeRefreshState := &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &instanceRefreshState, - targetState: refreshState, - Dependencies: &n.Dependencies, - } - _, err = writeRefreshState.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, n.Dependencies, refreshState)) + if diags.HasErrors() { + return diags } } // Plan the instance - diff := &EvalDiff{ - Addr: addr.Resource, - Config: n.Config, - CreateBeforeDestroy: n.ForceCreateBeforeDestroy, - Provider: &provider, - ProviderAddr: n.ResolvedProvider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &instanceRefreshState, - OutputChange: &change, - OutputState: &instancePlanState, - } - _, err = diff.Eval(ctx) - if err != nil { - return err + change, instancePlanState, planDiags := n.plan(ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy) + diags = diags.Append(planDiags) + if diags.HasErrors() { + return diags } - err = n.checkPreventDestroy(change) - if err != nil { - return err + diags = diags.Append(n.checkPreventDestroy(change)) + if diags.HasErrors() { + return diags } - writeState := &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - State: &instancePlanState, - ProviderSchema: &providerSchema, - } - _, err = writeState.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, n.Dependencies, workingState)) + if diags.HasErrors() { + return diags } - writeDiff := &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - } - _, err = writeDiff.Eval(ctx) - return err + diags = diags.Append(n.writeChange(ctx, change, "")) + return diags } diff --git a/terraform/node_resource_plan_orphan.go b/terraform/node_resource_plan_orphan.go index 515caec58..484c91b00 100644 --- a/terraform/node_resource_plan_orphan.go +++ b/terraform/node_resource_plan_orphan.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // NodePlannableResourceInstanceOrphan represents a resource that is "applyable": @@ -26,6 +27,7 @@ var ( _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) _ GraphNodeExecutable = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeProviderConsumer = (*NodePlannableResourceInstanceOrphan)(nil) ) func (n *NodePlannableResourceInstanceOrphan) Name() string { @@ -33,7 +35,7 @@ func (n *NodePlannableResourceInstanceOrphan) Name() string { } // GraphNodeExecutable -func (n *NodePlannableResourceInstanceOrphan) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodePlannableResourceInstanceOrphan) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { addr := n.ResourceInstanceAddr() // Eval info is different depending on what kind of resource this is @@ -47,30 +49,41 @@ func (n *NodePlannableResourceInstanceOrphan) Execute(ctx EvalContext, op walkOp } } -func (n *NodePlannableResourceInstanceOrphan) dataResourceExecute(ctx EvalContext) error { +func (n *NodePlannableResourceInstanceOrphan) ProvidedBy() (addr addrs.ProviderConfig, exact bool) { + if n.Addr.Resource.Resource.Mode == addrs.DataResourceMode { + // indicate that this node does not require a configured provider + return nil, true + } + return n.NodeAbstractResourceInstance.ProvidedBy() +} + +func (n *NodePlannableResourceInstanceOrphan) dataResourceExecute(ctx EvalContext) tfdiags.Diagnostics { // A data source that is no longer in the config is removed from the state log.Printf("[TRACE] NodePlannableResourceInstanceOrphan: removing state object for %s", n.Addr) - state := ctx.RefreshState() - state.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) + + // we need to update both the refresh state to refresh the current data + // source, and the working state for plan-time evaluations. + refreshState := ctx.RefreshState() + refreshState.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) + + workingState := ctx.State() + workingState.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider) return nil } -func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalContext) error { +func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) { addr := n.ResourceInstanceAddr() // Declare a bunch of variables that are used for state during // evaluation. These are written to by-address below. var change *plans.ResourceInstanceChange var state *states.ResourceInstanceObject + var err error - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err - } - - state, err = n.ReadResourceInstanceState(ctx, addr) - if err != nil { - return err + state, err = n.readResourceInstanceState(ctx, addr) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } if !n.skipRefresh { @@ -80,69 +93,34 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon // plan before apply, and may not handle a missing resource during // Delete correctly. If this is a simple refresh, Terraform is // expected to remove the missing resource from the state entirely - refresh := &EvalRefresh{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - } - _, err = refresh.Eval(ctx) - if err != nil { - return err + state, refreshDiags := n.refresh(ctx, state) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags } - writeRefreshState := &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - targetState: refreshState, - } - _, err = writeRefreshState.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.writeResourceInstanceState(ctx, state, n.Dependencies, refreshState)) + if diags.HasErrors() { + return diags } } - diffDestroy := &EvalDiffDestroy{ - Addr: addr.Resource, - State: &state, - ProviderAddr: n.ResolvedProvider, - Output: &change, - OutputState: &state, // Will point to a nil state after this complete, signalling destroyed - } - _, err = diffDestroy.Eval(ctx) - if err != nil { - return err + change, destroyPlanDiags := n.planDestroy(ctx, state, "") + diags = diags.Append(destroyPlanDiags) + if diags.HasErrors() { + return diags } - err = n.checkPreventDestroy(change) - if err != nil { - return err + diags = diags.Append(n.checkPreventDestroy(change)) + if diags.HasErrors() { + return diags } - writeDiff := &EvalWriteDiff{ - Addr: addr.Resource, - ProviderSchema: &providerSchema, - Change: &change, - } - _, err = writeDiff.Eval(ctx) - if err != nil { - return err + diags = diags.Append(n.writeChange(ctx, change, "")) + if diags.HasErrors() { + return diags } - writeState := &EvalWriteState{ - Addr: addr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - } - _, err = writeState.Eval(ctx) - if err != nil { - return err - } - return nil + diags = diags.Append(n.writeResourceInstanceState(ctx, nil, n.Dependencies, workingState)) + return diags } diff --git a/terraform/node_resource_plan_orphan_test.go b/terraform/node_resource_plan_orphan_test.go index f4396448e..57f6804d6 100644 --- a/terraform/node_resource_plan_orphan_test.go +++ b/terraform/node_resource_plan_orphan_test.go @@ -55,9 +55,9 @@ func TestNodeResourcePlanOrphanExecute(t *testing.T) { Addr: mustResourceInstanceAddr("test_object.foo"), }, } - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } if !state.Empty() { t.Fatalf("expected empty state, got %s", state.String()) diff --git a/terraform/node_resource_plan_test.go b/terraform/node_resource_plan_test.go index c5ae144ba..e3a8faa19 100644 --- a/terraform/node_resource_plan_test.go +++ b/terraform/node_resource_plan_test.go @@ -23,9 +23,9 @@ func TestNodePlannableResourceExecute(t *testing.T) { }, Addr: mustAbsResourceAddr("test_instance.foo"), } - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } if !state.Empty() { t.Fatalf("expected no state, got:\n %s", state.String()) @@ -48,9 +48,9 @@ func TestNodePlannableResourceExecute(t *testing.T) { }, Addr: mustAbsResourceAddr("test_instance.foo"), } - err := node.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } if state.Empty() { t.Fatal("expected resources in state, got empty state") diff --git a/terraform/node_resource_validate.go b/terraform/node_resource_validate.go index 436d30a45..933cb43d6 100644 --- a/terraform/node_resource_validate.go +++ b/terraform/node_resource_validate.go @@ -3,8 +3,13 @@ package terraform import ( "fmt" + "github.com/hashicorp/hcl/v2" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) @@ -31,31 +36,8 @@ func (n *NodeValidatableResource) Path() addrs.ModuleInstance { } // GraphNodeEvalable -func (n *NodeValidatableResource) Execute(ctx EvalContext, op walkOperation) error { - addr := n.ResourceAddr() - config := n.Config - - // Declare the variables will be used are used to pass values along - // the evaluation sequence below. These are written to via pointers - // passed to the EvalNodes. - var configVal cty.Value - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err - } - - evalValidateResource := &EvalValidateResource{ - Addr: addr.Resource, - Provider: &provider, - ProviderMetas: n.ProviderMetas, - ProviderSchema: &providerSchema, - Config: config, - ConfigVal: &configVal, - } - err = evalValidateResource.Validate(ctx) - if err != nil { - return err - } +func (n *NodeValidatableResource) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + diags = diags.Append(n.validateResource(ctx)) if managed := n.Config.Managed; managed != nil { hasCount := n.Config.Count != nil @@ -64,34 +46,426 @@ func (n *NodeValidatableResource) Execute(ctx EvalContext, op walkOperation) err // Validate all the provisioners for _, p := range managed.Provisioners { if p.Connection == nil { - p.Connection = config.Managed.Connection - } else if config.Managed.Connection != nil { - p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) - } - - provisioner := ctx.Provisioner(p.Type) - if provisioner == nil { - return fmt.Errorf("provisioner %s not initialized", p.Type) - } - provisionerSchema := ctx.ProvisionerSchema(p.Type) - if provisionerSchema == nil { - return fmt.Errorf("provisioner %s not initialized", p.Type) + p.Connection = n.Config.Managed.Connection + } else if n.Config.Managed.Connection != nil { + p.Connection.Config = configs.MergeBodies(n.Config.Managed.Connection.Config, p.Connection.Config) } // Validate Provisioner Config - validateProvisioner := &EvalValidateProvisioner{ - ResourceAddr: addr.Resource, - Provisioner: &provisioner, - Schema: &provisionerSchema, - Config: p, - ResourceHasCount: hasCount, - ResourceHasForEach: hasForEach, - } - err := validateProvisioner.Validate(ctx) - if err != nil { - return err + diags = diags.Append(n.validateProvisioner(ctx, p, hasCount, hasForEach)) + if diags.HasErrors() { + return diags } } } - return nil + return diags +} + +// validateProvisioner validates the configuration of a provisioner belonging to +// a resource. The provisioner config is expected to contain the merged +// connection configurations. +func (n *NodeValidatableResource) validateProvisioner(ctx EvalContext, p *configs.Provisioner, hasCount, hasForEach bool) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + provisioner, err := ctx.Provisioner(p.Type) + if err != nil { + diags = diags.Append(err) + return diags + } + + if provisioner == nil { + return diags.Append(fmt.Errorf("provisioner %s not initialized", p.Type)) + } + provisionerSchema := ctx.ProvisionerSchema(p.Type) + if provisionerSchema == nil { + return diags.Append(fmt.Errorf("provisioner %s not initialized", p.Type)) + } + + // Validate the provisioner's own config first + configVal, _, configDiags := n.evaluateBlock(ctx, p.Config, provisionerSchema, hasCount, hasForEach) + diags = diags.Append(configDiags) + + if configVal == cty.NilVal { + // Should never happen for a well-behaved EvaluateBlock implementation + return diags.Append(fmt.Errorf("EvaluateBlock returned nil value")) + } + + req := provisioners.ValidateProvisionerConfigRequest{ + Config: configVal, + } + + resp := provisioner.ValidateProvisionerConfig(req) + diags = diags.Append(resp.Diagnostics) + + if p.Connection != nil { + // We can't comprehensively validate the connection config since its + // final structure is decided by the communicator and we can't instantiate + // that until we have a complete instance state. However, we *can* catch + // configuration keys that are not valid for *any* communicator, catching + // typos early rather than waiting until we actually try to run one of + // the resource's provisioners. + _, _, connDiags := n.evaluateBlock(ctx, p.Connection.Config, connectionBlockSupersetSchema, hasCount, hasForEach) + diags = diags.Append(connDiags) + } + return diags +} + +func (n *NodeValidatableResource) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block, hasCount, hasForEach bool) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + keyData := EvalDataForNoInstanceKey + selfAddr := n.ResourceAddr().Resource.Instance(addrs.NoKey) + + if hasCount { + // For a resource that has count, we allow count.index but don't + // know at this stage what it will return. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // "self" can't point to an unknown key, but we'll force it to be + // key 0 here, which should return an unknown value of the + // expected type since none of these elements are known at this + // point anyway. + selfAddr = n.ResourceAddr().Resource.Instance(addrs.IntKey(0)) + } else if hasForEach { + // For a resource that has for_each, we allow each.value and each.key + // but don't know at this stage what it will return. + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.DynamicVal, + } + + // "self" can't point to an unknown key, but we'll force it to be + // key "" here, which should return an unknown value of the + // expected type since none of these elements are known at + // this point anyway. + selfAddr = n.ResourceAddr().Resource.Instance(addrs.StringKey("")) + } + + return ctx.EvaluateBlock(body, schema, selfAddr, keyData) +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. Once that is done, we can remove +// this and use a type-specific schema from the communicator to validate +// exactly what is expected for a given connection type. +var connectionBlockSupersetSchema = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // NOTE: "type" is not included here because it's treated special + // by the config loader and stored away in a separate field. + + // Common attributes for both connection types + "host": { + Type: cty.String, + Required: true, + }, + "type": { + Type: cty.String, + Optional: true, + }, + "user": { + Type: cty.String, + Optional: true, + }, + "password": { + Type: cty.String, + Optional: true, + }, + "port": { + Type: cty.String, + Optional: true, + }, + "timeout": { + Type: cty.String, + Optional: true, + }, + "script_path": { + Type: cty.String, + Optional: true, + }, + // For type=ssh only (enforced in ssh communicator) + "target_platform": { + Type: cty.String, + Optional: true, + }, + "private_key": { + Type: cty.String, + Optional: true, + }, + "certificate": { + Type: cty.String, + Optional: true, + }, + "host_key": { + Type: cty.String, + Optional: true, + }, + "agent": { + Type: cty.Bool, + Optional: true, + }, + "agent_identity": { + Type: cty.String, + Optional: true, + }, + "bastion_host": { + Type: cty.String, + Optional: true, + }, + "bastion_host_key": { + Type: cty.String, + Optional: true, + }, + "bastion_port": { + Type: cty.Number, + Optional: true, + }, + "bastion_user": { + Type: cty.String, + Optional: true, + }, + "bastion_password": { + Type: cty.String, + Optional: true, + }, + "bastion_private_key": { + Type: cty.String, + Optional: true, + }, + "bastion_certificate": { + Type: cty.String, + Optional: true, + }, + + // For type=winrm only (enforced in winrm communicator) + "https": { + Type: cty.Bool, + Optional: true, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + }, + "cacert": { + Type: cty.String, + Optional: true, + }, + "use_ntlm": { + Type: cty.Bool, + Optional: true, + }, + }, +} + +func (n *NodeValidatableResource) validateResource(ctx EvalContext) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags + } + if providerSchema == nil { + diags = diags.Append(fmt.Errorf("validateResource has nil schema for %s", n.Addr)) + return diags + } + + keyData := EvalDataForNoInstanceKey + + switch { + case n.Config.Count != nil: + // If the config block has count, we'll evaluate with an unknown + // number as count.index so we can still type check even though + // we won't expand count until the plan phase. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // Basic type-checking of the count argument. More complete validation + // of this will happen when we DynamicExpand during the plan walk. + countDiags := validateCount(ctx, n.Config.Count) + diags = diags.Append(countDiags) + + case n.Config.ForEach != nil: + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.UnknownVal(cty.DynamicPseudoType), + } + + // Evaluate the for_each expression here so we can expose the diagnostics + forEachDiags := validateForEach(ctx, n.Config.ForEach) + diags = diags.Append(forEachDiags) + } + + diags = diags.Append(validateDependsOn(ctx, n.Config.DependsOn)) + + // Validate the provider_meta block for the provider this resource + // belongs to, if there is one. + // + // Note: this will return an error for every resource a provider + // uses in a module, if the provider_meta for that module is + // incorrect. The only way to solve this that we've found is to + // insert a new ProviderMeta graph node in the graph, and make all + // that provider's resources in the module depend on the node. That's + // an awful heavy hammer to swing for this feature, which should be + // used only in limited cases with heavy coordination with the + // Terraform team, so we're going to defer that solution for a future + // enhancement to this functionality. + /* + if n.ProviderMetas != nil { + if m, ok := n.ProviderMetas[n.ProviderAddr.ProviderConfig.Type]; ok && m != nil { + // if the provider doesn't support this feature, throw an error + if (*n.ProviderSchema).ProviderMeta == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Provider %s doesn't support provider_meta", cfg.ProviderConfigAddr()), + Detail: fmt.Sprintf("The resource %s belongs to a provider that doesn't support provider_meta blocks", n.Addr), + Subject: &m.ProviderRange, + }) + } else { + _, _, metaDiags := ctx.EvaluateBlock(m.Config, (*n.ProviderSchema).ProviderMeta, nil, EvalDataForNoInstanceKey) + diags = diags.Append(metaDiags) + } + } + } + */ + // BUG(paddy): we're not validating provider_meta blocks on EvalValidate right now + // because the ProviderAddr for the resource isn't available on the EvalValidate + // struct. + + // Provider entry point varies depending on resource mode, because + // managed resources and data resources are two distinct concepts + // in the provider abstraction. + switch n.Config.Mode { + case addrs.ManagedResourceMode: + schema, _ := providerSchema.SchemaForResourceType(n.Config.Mode, n.Config.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type", + Detail: fmt.Sprintf("The provider %s does not support resource type %q.", n.Config.ProviderConfigAddr(), n.Config.Type), + Subject: &n.Config.TypeRange, + }) + return diags + } + + configVal, _, valDiags := ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return diags + } + + if n.Config.Managed != nil { // can be nil only in tests with poorly-configured mocks + for _, traversal := range n.Config.Managed.IgnoreChanges { + // validate the ignore_changes traversals apply. + moreDiags := schema.StaticValidateTraversal(traversal) + diags = diags.Append(moreDiags) + + // TODO: we want to notify users that they can't use + // ignore_changes for computed attributes, but we don't have an + // easy way to correlate the config value, schema and + // traversal together. + } + } + + // Use unmarked value for validate request + unmarkedConfigVal, _ := configVal.UnmarkDeep() + req := providers.ValidateResourceTypeConfigRequest{ + TypeName: n.Config.Type, + Config: unmarkedConfigVal, + } + + resp := provider.ValidateResourceTypeConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + + case addrs.DataResourceMode: + schema, _ := providerSchema.SchemaForResourceType(n.Config.Mode, n.Config.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source", + Detail: fmt.Sprintf("The provider %s does not support data source %q.", n.Config.ProviderConfigAddr(), n.Config.Type), + Subject: &n.Config.TypeRange, + }) + return diags + } + + configVal, _, valDiags := ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return diags + } + + // Use unmarked value for validate request + unmarkedConfigVal, _ := configVal.UnmarkDeep() + req := providers.ValidateDataSourceConfigRequest{ + TypeName: n.Config.Type, + Config: unmarkedConfigVal, + } + + resp := provider.ValidateDataSourceConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + } + + return diags +} + +func validateCount(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { + val, countDiags := evaluateCountExpressionValue(expr, ctx) + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk + if !val.IsKnown() { + return diags + } + + if countDiags.HasErrors() { + diags = diags.Append(countDiags) + } + + return diags +} + +func validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { + val, forEachDiags := evaluateForEachExpressionValue(expr, ctx, true) + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk + if !val.IsKnown() { + return diags + } + + if forEachDiags.HasErrors() { + diags = diags.Append(forEachDiags) + } + + return diags +} + +func validateDependsOn(ctx EvalContext, dependsOn []hcl.Traversal) (diags tfdiags.Diagnostics) { + for _, traversal := range dependsOn { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if !refDiags.HasErrors() && len(ref.Remaining) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid depends_on reference", + Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", + Subject: ref.Remaining.SourceRange().Ptr(), + }) + } + + // The ref must also refer to something that exists. To test that, + // we'll just eval it and count on the fact that our evaluator will + // detect references to non-existent objects. + if !diags.HasErrors() { + scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) + if scope != nil { // sometimes nil in tests, due to incomplete mocks + _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) + diags = diags.Append(refDiags) + } + } + } + return diags } diff --git a/terraform/eval_validate_test.go b/terraform/node_resource_validate_test.go similarity index 55% rename from terraform/eval_validate_test.go rename to terraform/node_resource_validate_test.go index ce23b3596..9cc8726ff 100644 --- a/terraform/eval_validate_test.go +++ b/terraform/node_resource_validate_test.go @@ -7,17 +7,153 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcltest" - "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/provisioners" "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" ) -func TestEvalValidateResource_managedResource(t *testing.T) { +func TestNodeValidatableResource_ValidateProvisioner_valid(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + mp := &MockProvisioner{} + ps := &configschema.Block{} + ctx.ProvisionerSchemaSchema = ps + ctx.ProvisionerProvisioner = mp + + pc := &configs.Provisioner{ + Type: "baz", + Config: hcl.EmptyBody(), + Connection: &configs.Connection{ + Config: configs.SynthBody("", map[string]cty.Value{ + "host": cty.StringVal("localhost"), + "type": cty.StringVal("ssh"), + }), + }, + } + + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_foo", + Name: "bar", + Config: configs.SynthBody("", map[string]cty.Value{}), + } + + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + }, + } + + diags := node.validateProvisioner(ctx, pc, false, false) + if diags.HasErrors() { + t.Fatalf("node.Eval failed: %s", diags.Err()) + } + if !mp.ValidateProvisionerConfigCalled { + t.Fatalf("p.ValidateProvisionerConfig not called") + } +} + +func TestNodeValidatableResource_ValidateProvisioner__warning(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + mp := &MockProvisioner{} + ps := &configschema.Block{} + ctx.ProvisionerSchemaSchema = ps + ctx.ProvisionerProvisioner = mp + + pc := &configs.Provisioner{ + Type: "baz", + Config: hcl.EmptyBody(), + } + + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_foo", + Name: "bar", + Config: configs.SynthBody("", map[string]cty.Value{}), + Managed: &configs.ManagedResource{}, + } + + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + }, + } + + { + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.SimpleWarning("foo is deprecated")) + mp.ValidateProvisionerConfigResponse = provisioners.ValidateProvisionerConfigResponse{ + Diagnostics: diags, + } + } + + diags := node.validateProvisioner(ctx, pc, false, false) + if len(diags) != 1 { + t.Fatalf("wrong number of diagnostics in %s; want one warning", diags.ErrWithWarnings()) + } + + if got, want := diags[0].Description().Summary, mp.ValidateProvisionerConfigResponse.Diagnostics[0].Description().Summary; got != want { + t.Fatalf("wrong warning %q; want %q", got, want) + } +} + +func TestNodeValidatableResource_ValidateProvisioner__conntectionInvalid(t *testing.T) { + ctx := &MockEvalContext{} + ctx.installSimpleEval() + mp := &MockProvisioner{} + ps := &configschema.Block{} + ctx.ProvisionerSchemaSchema = ps + ctx.ProvisionerProvisioner = mp + + pc := &configs.Provisioner{ + Type: "baz", + Config: hcl.EmptyBody(), + Connection: &configs.Connection{ + Config: configs.SynthBody("", map[string]cty.Value{ + "type": cty.StringVal("ssh"), + "bananananananana": cty.StringVal("foo"), + "bazaz": cty.StringVal("bar"), + }), + }, + } + + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_foo", + Name: "bar", + Config: configs.SynthBody("", map[string]cty.Value{}), + Managed: &configs.ManagedResource{}, + } + + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + }, + } + + diags := node.validateProvisioner(ctx, pc, false, false) + if !diags.HasErrors() { + t.Fatalf("node.Eval succeeded; want error") + } + if len(diags) != 3 { + t.Fatalf("wrong number of diagnostics; want two errors\n\n%s", diags.Err()) + } + + errStr := diags.Err().Error() + if !(strings.Contains(errStr, "bananananananana") && strings.Contains(errStr, "bazaz")) { + t.Fatalf("wrong errors %q; want something about each of our invalid connInfo keys", errStr) + } +} + +func TestNodeValidatableResource_ValidateResource_managedResource(t *testing.T) { mp := simpleMockProvider() mp.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { if got, want := req.TypeName, "test_object"; got != want { @@ -26,6 +162,9 @@ func TestEvalValidateResource_managedResource(t *testing.T) { if got, want := req.Config.GetAttr("test_string"), cty.StringVal("bar"); !got.RawEquals(want) { t.Fatalf("wrong value for test_string\ngot: %#v\nwant: %#v", got, want) } + if got, want := req.Config.GetAttr("test_number"), cty.NumberIntVal(2); !got.RawEquals(want) { + t.Fatalf("wrong value for test_number\ngot: %#v\nwant: %#v", got, want) + } return providers.ValidateResourceTypeConfigResponse{} } @@ -36,23 +175,23 @@ func TestEvalValidateResource_managedResource(t *testing.T) { Name: "foo", Config: configs.SynthBody("", map[string]cty.Value{ "test_string": cty.StringVal("bar"), + "test_number": cty.NumberIntVal(2).Mark("sensitive"), }), } - node := &EvalValidateResource{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), }, - Provider: &p, - Config: rc, - ProviderSchema: &mp.GetSchemaReturn, } ctx := &MockEvalContext{} ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.ProviderSchema() + ctx.ProviderProvider = p - err := node.Validate(ctx) + err := node.validateResource(ctx) if err != nil { t.Fatalf("err: %s", err) } @@ -62,7 +201,8 @@ func TestEvalValidateResource_managedResource(t *testing.T) { } } -func TestEvalValidateResource_managedResourceCount(t *testing.T) { +func TestNodeValidatableResource_ValidateResource_managedResourceCount(t *testing.T) { + // Setup mp := simpleMockProvider() mp.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { if got, want := req.TypeName, "test_object"; got != want { @@ -75,40 +215,58 @@ func TestEvalValidateResource_managedResourceCount(t *testing.T) { } p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Count: hcltest.MockExprLiteral(cty.NumberIntVal(2)), - Config: configs.SynthBody("", map[string]cty.Value{ - "test_string": cty.StringVal("bar"), - }), - } - node := &EvalValidateResource{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }, - Provider: &p, - Config: rc, - ProviderSchema: &mp.GetSchemaReturn, - } ctx := &MockEvalContext{} ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.ProviderSchema() + ctx.ProviderProvider = p - err := node.Validate(ctx) - if err != nil { - t.Fatalf("err: %s", err) + tests := []struct { + name string + count hcl.Expression + }{ + { + "simple count", + hcltest.MockExprLiteral(cty.NumberIntVal(2)), + }, + { + "marked count value", + hcltest.MockExprLiteral(cty.NumberIntVal(3).Mark("marked")), + }, } - if !mp.ValidateResourceTypeConfigCalled { - t.Fatal("Expected ValidateResourceTypeConfig to be called, but it was not!") + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rc := &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_object", + Name: "foo", + Count: test.count, + Config: configs.SynthBody("", map[string]cty.Value{ + "test_string": cty.StringVal("bar"), + }), + } + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + }, + } + + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) + } + + if !mp.ValidateResourceTypeConfigCalled { + t.Fatal("Expected ValidateResourceTypeConfig to be called, but it was not!") + } + }) } } -func TestEvalValidateResource_dataSource(t *testing.T) { +func TestNodeValidatableResource_ValidateResource_dataSource(t *testing.T) { mp := simpleMockProvider() mp.ValidateDataSourceConfigFn = func(req providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { if got, want := req.TypeName, "test_object"; got != want { @@ -117,6 +275,9 @@ func TestEvalValidateResource_dataSource(t *testing.T) { if got, want := req.Config.GetAttr("test_string"), cty.StringVal("bar"); !got.RawEquals(want) { t.Fatalf("wrong value for test_string\ngot: %#v\nwant: %#v", got, want) } + if got, want := req.Config.GetAttr("test_number"), cty.NumberIntVal(2); !got.RawEquals(want) { + t.Fatalf("wrong value for test_number\ngot: %#v\nwant: %#v", got, want) + } return providers.ValidateDataSourceConfigResponse{} } @@ -127,26 +288,26 @@ func TestEvalValidateResource_dataSource(t *testing.T) { Name: "foo", Config: configs.SynthBody("", map[string]cty.Value{ "test_string": cty.StringVal("bar"), + "test_number": cty.NumberIntVal(2).Mark("sensitive"), }), } - node := &EvalValidateResource{ - Addr: addrs.Resource{ - Mode: addrs.DataResourceMode, - Type: "aws_ami", - Name: "foo", + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), }, - Provider: &p, - Config: rc, - ProviderSchema: &mp.GetSchemaReturn, } ctx := &MockEvalContext{} ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.ProviderSchema() + ctx.ProviderProvider = p - err := node.Validate(ctx) - if err != nil { - t.Fatalf("err: %s", err) + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) } if !mp.ValidateDataSourceConfigCalled { @@ -154,7 +315,7 @@ func TestEvalValidateResource_dataSource(t *testing.T) { } } -func TestEvalValidateResource_validReturnsNilError(t *testing.T) { +func TestNodeValidatableResource_ValidateResource_valid(t *testing.T) { mp := simpleMockProvider() mp.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { return providers.ValidateResourceTypeConfigResponse{} @@ -167,27 +328,26 @@ func TestEvalValidateResource_validReturnsNilError(t *testing.T) { Name: "foo", Config: configs.SynthBody("", map[string]cty.Value{}), } - node := &EvalValidateResource{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_object.foo"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), }, - Provider: &p, - Config: rc, - ProviderSchema: &mp.GetSchemaReturn, } ctx := &MockEvalContext{} ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.ProviderSchema() + ctx.ProviderProvider = p - err := node.Validate(ctx) - if err != nil { - t.Fatalf("Expected nil error, got: %s", err) + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("err: %s", diags.Err()) } } -func TestEvalValidateResource_warningsAndErrorsPassedThrough(t *testing.T) { +func TestNodeValidatableResource_ValidateResource_warningsAndErrorsPassedThrough(t *testing.T) { mp := simpleMockProvider() mp.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { var diags tfdiags.Diagnostics @@ -205,27 +365,24 @@ func TestEvalValidateResource_warningsAndErrorsPassedThrough(t *testing.T) { Name: "foo", Config: configs.SynthBody("", map[string]cty.Value{}), } - node := &EvalValidateResource{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), }, - Provider: &p, - Config: rc, - ProviderSchema: &mp.GetSchemaReturn, } ctx := &MockEvalContext{} ctx.installSimpleEval() + ctx.ProviderSchemaSchema = mp.ProviderSchema() + ctx.ProviderProvider = p - err := node.Validate(ctx) - if err == nil { + diags := node.validateResource(ctx) + if !diags.HasErrors() { t.Fatal("unexpected success; want error") } - var diags tfdiags.Diagnostics - diags = diags.Append(err) bySeverity := map[tfdiags.Severity]tfdiags.Diagnostics{} for _, diag := range diags { bySeverity[diag.Severity()] = append(bySeverity[diag.Severity()], diag) @@ -238,46 +395,7 @@ func TestEvalValidateResource_warningsAndErrorsPassedThrough(t *testing.T) { } } -func TestEvalValidateResource_ignoreWarnings(t *testing.T) { - mp := simpleMockProvider() - mp.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.SimpleWarning("warn")) - return providers.ValidateResourceTypeConfigResponse{ - Diagnostics: diags, - } - } - - p := providers.Interface(mp) - rc := &configs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test_object", - Name: "foo", - Config: configs.SynthBody("", map[string]cty.Value{}), - } - node := &EvalValidateResource{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "test-object", - Name: "foo", - }, - Provider: &p, - Config: rc, - ProviderSchema: &mp.GetSchemaReturn, - - IgnoreWarnings: true, - } - - ctx := &MockEvalContext{} - ctx.installSimpleEval() - - err := node.Validate(ctx) - if err != nil { - t.Fatalf("Expected no error, got: %s", err) - } -} - -func TestEvalValidateResource_invalidDependsOn(t *testing.T) { +func TestNodeValidatableResource_ValidateResource_invalidDependsOn(t *testing.T) { mp := simpleMockProvider() mp.ValidateResourceTypeConfigFn = func(req providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { return providers.ValidateResourceTypeConfigResponse{} @@ -309,23 +427,23 @@ func TestEvalValidateResource_invalidDependsOn(t *testing.T) { }, }, } - node := &EvalValidateResource{ - Addr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", + node := NodeValidatableResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: mustConfigResourceAddr("test_foo.bar"), + Config: rc, + ResolvedProvider: mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), }, - Provider: &p, - Config: rc, - ProviderSchema: &mp.GetSchemaReturn, } ctx := &MockEvalContext{} ctx.installSimpleEval() - err := node.Validate(ctx) - if err != nil { - t.Fatalf("error for supposedly-valid config: %s", err) + ctx.ProviderSchemaSchema = mp.ProviderSchema() + ctx.ProviderProvider = p + + diags := node.validateResource(ctx) + if diags.HasErrors() { + t.Fatalf("error for supposedly-valid config: %s", diags.ErrWithWarnings()) } // Now we'll make it invalid by adding additional traversal steps at @@ -344,11 +462,11 @@ func TestEvalValidateResource_invalidDependsOn(t *testing.T) { }, }) - err = node.Validate(ctx) - if err == nil { + diags = node.validateResource(ctx) + if !diags.HasErrors() { t.Fatal("no error for invalid depends_on") } - if got, want := err.Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { + if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) } @@ -360,159 +478,11 @@ func TestEvalValidateResource_invalidDependsOn(t *testing.T) { }, }) - err = node.Validate(ctx) - if err == nil { + diags = node.validateResource(ctx) + if !diags.HasErrors() { t.Fatal("no error for invalid depends_on") } - if got, want := err.Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { + if got, want := diags.Err().Error(), "Invalid depends_on reference"; !strings.Contains(got, want) { t.Fatalf("wrong error\ngot: %s\nwant: Message containing %q", got, want) } } - -func TestEvalValidateProvisioner_valid(t *testing.T) { - mp := &MockProvisioner{} - var p provisioners.Interface = mp - ctx := &MockEvalContext{} - ctx.installSimpleEval() - - schema := &configschema.Block{} - - node := &EvalValidateProvisioner{ - ResourceAddr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "foo", - Name: "bar", - }, - Provisioner: &p, - Schema: &schema, - Config: &configs.Provisioner{ - Type: "baz", - Config: hcl.EmptyBody(), - Connection: &configs.Connection{ - Config: configs.SynthBody("", map[string]cty.Value{ - "host": cty.StringVal("localhost"), - "type": cty.StringVal("ssh"), - }), - }, - }, - } - - err := node.Validate(ctx) - if err != nil { - t.Fatalf("node.Eval failed: %s", err) - } - if !mp.ValidateProvisionerConfigCalled { - t.Fatalf("p.ValidateProvisionerConfig not called") - } -} - -func TestEvalValidateProvisioner_warning(t *testing.T) { - mp := &MockProvisioner{} - var p provisioners.Interface = mp - ctx := &MockEvalContext{} - ctx.installSimpleEval() - - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "type": { - Type: cty.String, - Optional: true, - }, - }, - } - - node := &EvalValidateProvisioner{ - ResourceAddr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "foo", - Name: "bar", - }, - Provisioner: &p, - Schema: &schema, - Config: &configs.Provisioner{ - Type: "baz", - Config: hcl.EmptyBody(), - Connection: &configs.Connection{ - Config: configs.SynthBody("", map[string]cty.Value{ - "host": cty.StringVal("localhost"), - "type": cty.StringVal("ssh"), - }), - }, - }, - } - - { - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.SimpleWarning("foo is deprecated")) - mp.ValidateProvisionerConfigResponse = provisioners.ValidateProvisionerConfigResponse{ - Diagnostics: diags, - } - } - - err := node.Validate(ctx) - if err == nil { - t.Fatalf("node.Eval succeeded; want error") - } - - var diags tfdiags.Diagnostics - diags = diags.Append(err) - if len(diags) != 1 { - t.Fatalf("wrong number of diagnostics in %s; want one warning", diags.ErrWithWarnings()) - } - - if got, want := diags[0].Description().Summary, mp.ValidateProvisionerConfigResponse.Diagnostics[0].Description().Summary; got != want { - t.Fatalf("wrong warning %q; want %q", got, want) - } -} - -func TestEvalValidateProvisioner_connectionInvalid(t *testing.T) { - var p provisioners.Interface = &MockProvisioner{} - ctx := &MockEvalContext{} - ctx.installSimpleEval() - - schema := &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "type": { - Type: cty.String, - Optional: true, - }, - }, - } - - node := &EvalValidateProvisioner{ - ResourceAddr: addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "foo", - Name: "bar", - }, - Provisioner: &p, - Schema: &schema, - Config: &configs.Provisioner{ - Type: "baz", - Config: hcl.EmptyBody(), - Connection: &configs.Connection{ - Config: configs.SynthBody("", map[string]cty.Value{ - "type": cty.StringVal("ssh"), - "bananananananana": cty.StringVal("foo"), - "bazaz": cty.StringVal("bar"), - }), - }, - }, - } - - err := node.Validate(ctx) - if err == nil { - t.Fatalf("node.Eval succeeded; want error") - } - - var diags tfdiags.Diagnostics - diags = diags.Append(err) - if len(diags) != 3 { - t.Fatalf("wrong number of diagnostics; want two errors\n\n%s", diags.Err()) - } - - errStr := diags.Err().Error() - if !(strings.Contains(errStr, "bananananananana") && strings.Contains(errStr, "bazaz")) { - t.Fatalf("wrong errors %q; want something about each of our invalid connInfo keys", errStr) - } -} diff --git a/terraform/node_root_variable.go b/terraform/node_root_variable.go index 63fe81804..5e90e4ad6 100644 --- a/terraform/node_root_variable.go +++ b/terraform/node_root_variable.go @@ -4,6 +4,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) // NodeRootVariable represents a root variable input. @@ -36,7 +37,7 @@ func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { } // GraphNodeExecutable -func (n *NodeRootVariable) Execute(ctx EvalContext, op walkOperation) error { +func (n *NodeRootVariable) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics { // We don't actually need to _evaluate_ a root module variable, because // its value is always constant and already stashed away in our EvalContext. // However, we might need to run some user-defined validation rules against diff --git a/terraform/node_root_variable_test.go b/terraform/node_root_variable_test.go index 2a410e212..6546395bd 100644 --- a/terraform/node_root_variable_test.go +++ b/terraform/node_root_variable_test.go @@ -17,9 +17,9 @@ func TestNodeRootVariableExecute(t *testing.T) { }, } - err := n.Execute(ctx, walkApply) - if err != nil { - t.Fatalf("unexpected error: %s", err.Error()) + diags := n.Execute(ctx, walkApply) + if diags.HasErrors() { + t.Fatalf("unexpected error: %s", diags.Err()) } } diff --git a/terraform/plan.go b/terraform/plan.go deleted file mode 100644 index af04c6cd4..000000000 --- a/terraform/plan.go +++ /dev/null @@ -1,122 +0,0 @@ -package terraform - -import ( - "bytes" - "encoding/gob" - "fmt" - "io" - "sync" - - "github.com/zclconf/go-cty/cty" - - "github.com/hashicorp/terraform/configs" -) - -func init() { - gob.Register(make([]interface{}, 0)) - gob.Register(make([]map[string]interface{}, 0)) - gob.Register(make(map[string]interface{})) - gob.Register(make(map[string]string)) -} - -// Plan represents a single Terraform execution plan, which contains -// all the information necessary to make an infrastructure change. -// -// A plan has to contain basically the entire state of the world -// necessary to make a change: the state, diff, config, backend config, etc. -// This is so that it can run alone without any other data. -type Plan struct { - // Diff describes the resource actions that must be taken when this - // plan is applied. - Diff *Diff - - // Config represents the entire configuration that was present when this - // plan was created. - Config *configs.Config - - // State is the Terraform state that was current when this plan was - // created. - // - // It is not allowed to apply a plan that has a stale state, since its - // diff could be outdated. - State *State - - // Vars retains the variables that were set when creating the plan, so - // that the same variables can be applied during apply. - Vars map[string]cty.Value - - // Targets, if non-empty, contains a set of resource address strings that - // identify graph nodes that were selected as targets for plan. - // - // When targets are set, any graph node that is not directly targeted or - // indirectly targeted via dependencies is excluded from the graph. - Targets []string - - // TerraformVersion is the version of Terraform that was used to create - // this plan. - // - // It is not allowed to apply a plan created with a different version of - // Terraform, since the other fields of this structure may be interpreted - // in different ways between versions. - TerraformVersion string - - // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries - // used as plugins for each provider during plan. - // - // These must match between plan and apply to ensure that the diff is - // correctly interpreted, since different provider versions may have - // different attributes or attribute value constraints. - ProviderSHA256s map[string][]byte - - // Backend is the backend that this plan should use and store data with. - Backend *BackendState - - // Destroy indicates that this plan was created for a full destroy operation - Destroy bool - - once sync.Once -} - -func (p *Plan) String() string { - buf := new(bytes.Buffer) - buf.WriteString("DIFF:\n\n") - buf.WriteString(p.Diff.String()) - buf.WriteString("\n\nSTATE:\n\n") - buf.WriteString(p.State.String()) - return buf.String() -} - -func (p *Plan) init() { - p.once.Do(func() { - if p.Diff == nil { - p.Diff = new(Diff) - p.Diff.init() - } - - if p.State == nil { - p.State = new(State) - p.State.init() - } - - if p.Vars == nil { - p.Vars = make(map[string]cty.Value) - } - }) -} - -// The format byte is prefixed into the plan file format so that we have -// the ability in the future to change the file format if we want for any -// reason. -const planFormatMagic = "tfplan" -const planFormatVersion byte = 2 - -// ReadPlan reads a plan structure out of a reader in the format that -// was written by WritePlan. -func ReadPlan(src io.Reader) (*Plan, error) { - return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") -} - -// WritePlan writes a plan somewhere in a binary format. -func WritePlan(d *Plan, dst io.Writer) error { - return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") -} diff --git a/terraform/provider_mock.go b/terraform/provider_mock.go index bf3aca327..b753f3810 100644 --- a/terraform/provider_mock.go +++ b/terraform/provider_mock.go @@ -1,12 +1,14 @@ package terraform import ( - "encoding/json" + "fmt" "sync" "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" + "github.com/zclconf/go-cty/cty/msgpack" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/configs/hcl2shim" "github.com/hashicorp/terraform/providers" ) @@ -21,34 +23,34 @@ type MockProvider struct { // Anything you want, in case you need to store extra data with the mock. Meta interface{} - GetSchemaCalled bool - GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests + GetSchemaCalled bool + GetSchemaResponse *providers.GetSchemaResponse PrepareProviderConfigCalled bool - PrepareProviderConfigResponse providers.PrepareProviderConfigResponse + PrepareProviderConfigResponse *providers.PrepareProviderConfigResponse PrepareProviderConfigRequest providers.PrepareProviderConfigRequest PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse ValidateResourceTypeConfigCalled bool ValidateResourceTypeConfigTypeName string - ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse + ValidateResourceTypeConfigResponse *providers.ValidateResourceTypeConfigResponse ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse ValidateDataSourceConfigCalled bool ValidateDataSourceConfigTypeName string - ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse + ValidateDataSourceConfigResponse *providers.ValidateDataSourceConfigResponse ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse UpgradeResourceStateCalled bool UpgradeResourceStateTypeName string - UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateResponse *providers.UpgradeResourceStateResponse UpgradeResourceStateRequest providers.UpgradeResourceStateRequest UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse ConfigureCalled bool - ConfigureResponse providers.ConfigureResponse + ConfigureResponse *providers.ConfigureResponse ConfigureRequest providers.ConfigureRequest ConfigureFn func(providers.ConfigureRequest) providers.ConfigureResponse @@ -57,30 +59,27 @@ type MockProvider struct { StopResponse error ReadResourceCalled bool - ReadResourceResponse providers.ReadResourceResponse + ReadResourceResponse *providers.ReadResourceResponse ReadResourceRequest providers.ReadResourceRequest ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse PlanResourceChangeCalled bool - PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeResponse *providers.PlanResourceChangeResponse PlanResourceChangeRequest providers.PlanResourceChangeRequest PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse ApplyResourceChangeCalled bool - ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeResponse *providers.ApplyResourceChangeResponse ApplyResourceChangeRequest providers.ApplyResourceChangeRequest ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse ImportResourceStateCalled bool - ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateResponse *providers.ImportResourceStateResponse ImportResourceStateRequest providers.ImportResourceStateRequest ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse - // Legacy return type for existing tests, which will be shimmed into an - // ImportResourceStateResponse if set - ImportStateReturn []*InstanceState ReadDataSourceCalled bool - ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceResponse *providers.ReadDataSourceResponse ReadDataSourceRequest providers.ReadDataSourceRequest ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse @@ -99,32 +98,43 @@ func (p *MockProvider) getSchema() providers.GetSchemaResponse { // This version of getSchema doesn't do any locking, so it's suitable to // call from other methods of this mock as long as they are already // holding the lock. + if p.GetSchemaResponse != nil { + return *p.GetSchemaResponse + } - ret := providers.GetSchemaResponse{ + return providers.GetSchemaResponse{ Provider: providers.Schema{}, DataSources: map[string]providers.Schema{}, ResourceTypes: map[string]providers.Schema{}, } - if p.GetSchemaReturn != nil { - ret.Provider.Block = p.GetSchemaReturn.Provider - ret.ProviderMeta.Block = p.GetSchemaReturn.ProviderMeta - for n, s := range p.GetSchemaReturn.DataSources { - ret.DataSources[n] = providers.Schema{ - Block: s, - } - } - for n, s := range p.GetSchemaReturn.ResourceTypes { - ret.ResourceTypes[n] = providers.Schema{ - Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), - Block: s, - } - } - } - - return ret } -func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { +// ProviderSchema is a helper to convert from the internal GetSchemaResponse to +// a ProviderSchema. +func (p *MockProvider) ProviderSchema() *ProviderSchema { + resp := p.getSchema() + + schema := &ProviderSchema{ + Provider: resp.Provider.Block, + ProviderMeta: resp.ProviderMeta.Block, + ResourceTypes: map[string]*configschema.Block{}, + DataSources: map[string]*configschema.Block{}, + ResourceTypeSchemaVersions: map[string]uint64{}, + } + + for resType, s := range resp.ResourceTypes { + schema.ResourceTypes[resType] = s.Block + schema.ResourceTypeSchemaVersions[resType] = uint64(s.Version) + } + + for dataSource, s := range resp.DataSources { + schema.DataSources[dataSource] = s.Block + } + + return schema +} + +func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { p.Lock() defer p.Unlock() @@ -133,43 +143,87 @@ func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRe if p.PrepareProviderConfigFn != nil { return p.PrepareProviderConfigFn(r) } - return p.PrepareProviderConfigResponse + + if p.PrepareProviderConfigResponse != nil { + return *p.PrepareProviderConfigResponse + } + + resp.PreparedConfig = r.Config + return resp } -func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { +func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { p.Lock() defer p.Unlock() p.ValidateResourceTypeConfigCalled = true p.ValidateResourceTypeConfigRequest = r + // Marshall the value to replicate behavior by the GRPC protocol, + // and return any relevant errors + resourceSchema, ok := p.getSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + + _, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + if p.ValidateResourceTypeConfigFn != nil { return p.ValidateResourceTypeConfigFn(r) } - return p.ValidateResourceTypeConfigResponse + if p.ValidateResourceTypeConfigResponse != nil { + return *p.ValidateResourceTypeConfigResponse + } + + return resp } -func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { +func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { p.Lock() defer p.Unlock() p.ValidateDataSourceConfigCalled = true p.ValidateDataSourceConfigRequest = r + // Marshall the value to replicate behavior by the GRPC protocol + dataSchema, ok := p.getSchema().DataSources[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + _, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + if p.ValidateDataSourceConfigFn != nil { return p.ValidateDataSourceConfigFn(r) } - return p.ValidateDataSourceConfigResponse + if p.ValidateDataSourceConfigResponse != nil { + return *p.ValidateDataSourceConfigResponse + } + + return resp } -func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { p.Lock() defer p.Unlock() - schemas := p.getSchema() - schema := schemas.ResourceTypes[r.TypeName] + schema, ok := p.getSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + schemaType := schema.Block.ImpliedType() p.UpgradeResourceStateCalled = true @@ -179,31 +233,32 @@ func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequ return p.UpgradeResourceStateFn(r) } - resp := p.UpgradeResourceStateResponse - - if resp.UpgradedState == cty.NilVal { - switch { - case r.RawStateFlatmap != nil: - v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - case len(r.RawStateJSON) > 0: - v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) - - if err != nil { - resp.Diagnostics = resp.Diagnostics.Append(err) - return resp - } - resp.UpgradedState = v - } + if p.UpgradeResourceStateResponse != nil { + return *p.UpgradeResourceStateResponse } + + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + return resp } -func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { +func (p *MockProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) { p.Lock() defer p.Unlock() @@ -214,7 +269,11 @@ func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.Configu return p.ConfigureFn(r) } - return p.ConfigureResponse + if p.ConfigureResponse != nil { + return *p.ConfigureResponse + } + + return resp } func (p *MockProvider) Stop() error { @@ -231,7 +290,7 @@ func (p *MockProvider) Stop() error { return p.StopResponse } -func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { p.Lock() defer p.Unlock() @@ -242,24 +301,32 @@ func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.R return p.ReadResourceFn(r) } - resp := p.ReadResourceResponse - if resp.NewState != cty.NilVal { - // make sure the NewState fits the schema - // This isn't always the case for the existing tests - newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(resp.NewState) + if p.ReadResourceResponse != nil { + resp = *p.ReadResourceResponse + + // Make sure the NewState conforms to the schema. + // This isn't always the case for the existing tests. + schema, ok := p.getSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + + newState, err := schema.Block.CoerceValue(resp.NewState) if err != nil { - panic(err) + resp.Diagnostics = resp.Diagnostics.Append(err) } resp.NewState = newState return resp } - // just return the same state we received + // otherwise just return the same state we received resp.NewState = r.PriorState + resp.Private = r.Private return resp } -func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { p.Lock() defer p.Unlock() @@ -270,10 +337,64 @@ func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) return p.PlanResourceChangeFn(r) } - return p.PlanResourceChangeResponse + if p.PlanResourceChangeResponse != nil { + return *p.PlanResourceChangeResponse + } + + schema, ok := p.getSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + + // The default plan behavior is to accept the proposed value, and mark all + // nil computed attributes as unknown. + val, err := cty.Transform(r.ProposedNewState, func(path cty.Path, v cty.Value) (cty.Value, error) { + // We're only concerned with known null values, which can be computed + // by the provider. + if !v.IsKnown() { + return v, nil + } + + attrSchema := schema.Block.AttributeByPath(path) + if attrSchema == nil { + // this is an intermediate path which does not represent an attribute + return v, nil + } + + // get the current configuration value, to detect when a + // computed+optional attributes has become unset + configVal, err := path.Apply(r.Config) + if err != nil { + return v, err + } + + switch { + case attrSchema.Computed && !attrSchema.Optional && v.IsNull(): + // this is the easy path, this value is not yet set, and _must_ be computed + return cty.UnknownVal(v.Type()), nil + + case attrSchema.Computed && attrSchema.Optional && !v.IsNull() && configVal.IsNull(): + // If an optional+computed value has gone from set to unset, it + // becomes computed. (this was not possible to do with legacy + // providers) + return cty.UnknownVal(v.Type()), nil + } + + return v, nil + }) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.PlannedPrivate = r.PriorPrivate + resp.PlannedState = val + + return resp } -func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { p.Lock() p.ApplyResourceChangeCalled = true p.ApplyResourceChangeRequest = r @@ -283,67 +404,93 @@ func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeReques return p.ApplyResourceChangeFn(r) } - return p.ApplyResourceChangeResponse + if p.ApplyResourceChangeResponse != nil { + return *p.ApplyResourceChangeResponse + } + + schema, ok := p.getSchema().ResourceTypes[r.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", r.TypeName)) + return resp + } + + // if the value is nil, we return that directly to correspond to a delete + if r.PlannedState.IsNull() { + resp.NewState = cty.NullVal(schema.Block.ImpliedType()) + return resp + } + + val, err := schema.Block.CoerceValue(r.PlannedState) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + // the default behavior will be to create the minimal valid apply value by + // setting unknowns (which correspond to computed attributes) to a zero + // value. + val, _ = cty.Transform(val, func(path cty.Path, v cty.Value) (cty.Value, error) { + if !v.IsKnown() { + ty := v.Type() + switch { + case ty == cty.String: + return cty.StringVal(""), nil + case ty == cty.Number: + return cty.NumberIntVal(0), nil + case ty == cty.Bool: + return cty.False, nil + case ty.IsMapType(): + return cty.MapValEmpty(ty.ElementType()), nil + case ty.IsListType(): + return cty.ListValEmpty(ty.ElementType()), nil + default: + return cty.NullVal(ty), nil + } + } + return v, nil + }) + + resp.NewState = val + resp.Private = r.PlannedPrivate + + return resp } -func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { p.Lock() defer p.Unlock() - if p.ImportStateReturn != nil { - for _, is := range p.ImportStateReturn { - if is.Attributes == nil { - is.Attributes = make(map[string]string) - } - is.Attributes["id"] = is.ID - - typeName := is.Ephemeral.Type - // Use the requested type if the resource has no type of it's own. - // We still return the empty type, which will error, but this prevents a panic. - if typeName == "" { - typeName = r.TypeName - } - - schema := p.GetSchemaReturn.ResourceTypes[typeName] - if schema == nil { - panic("no schema found for " + typeName) - } - - private, err := json.Marshal(is.Meta) - if err != nil { - panic(err) - } - - state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) - if err != nil { - panic(err) - } - - state, err = schema.CoerceValue(state) - if err != nil { - panic(err) - } - - p.ImportResourceStateResponse.ImportedResources = append( - p.ImportResourceStateResponse.ImportedResources, - providers.ImportedResource{ - TypeName: is.Ephemeral.Type, - State: state, - Private: private, - }) - } - } - p.ImportResourceStateCalled = true p.ImportResourceStateRequest = r if p.ImportResourceStateFn != nil { return p.ImportResourceStateFn(r) } - return p.ImportResourceStateResponse + if p.ImportResourceStateResponse != nil { + resp = *p.ImportResourceStateResponse + // fixup the cty value to match the schema + for i, res := range resp.ImportedResources { + schema, ok := p.getSchema().ResourceTypes[res.TypeName] + if !ok { + resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("no schema found for %q", res.TypeName)) + return resp + } + + var err error + res.State, err = schema.Block.CoerceValue(res.State) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.ImportedResources[i] = res + } + } + + return resp } -func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { p.Lock() defer p.Unlock() @@ -354,7 +501,11 @@ func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) provide return p.ReadDataSourceFn(r) } - return p.ReadDataSourceResponse + if p.ReadDataSourceResponse != nil { + resp = *p.ReadDataSourceResponse + } + + return resp } func (p *MockProvider) Close() error { diff --git a/terraform/resource_provisioner_mock_test.go b/terraform/provisioner_mock_test.go similarity index 89% rename from terraform/resource_provisioner_mock_test.go rename to terraform/provisioner_mock_test.go index 600a9825d..242c09b65 100644 --- a/terraform/resource_provisioner_mock_test.go +++ b/terraform/provisioner_mock_test.go @@ -1,15 +1,9 @@ package terraform import ( - "testing" - "github.com/hashicorp/terraform/provisioners" ) -func TestMockResourceProvisioner_impl(t *testing.T) { - var _ ResourceProvisioner = new(MockResourceProvisioner) -} - // simpleMockProvisioner returns a MockProvisioner that is pre-configured // with schema for its own config, with the same content as returned by // function simpleTestSchema. diff --git a/terraform/reduce_plan.go b/terraform/reduce_plan.go new file mode 100644 index 000000000..097fe6aa3 --- /dev/null +++ b/terraform/reduce_plan.go @@ -0,0 +1,32 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" +) + +// reducePlan takes a planned resource instance change as might be produced by +// Plan or PlanDestroy and "simplifies" it to a single atomic action to be +// performed by a specific graph node. +// +// Callers must specify whether they are a destroy node or a regular apply node. +// If the result is NoOp then the given change requires no action for the +// specific graph node calling this and so evaluation of the that graph node +// should exit early and take no action. +// +// The returned object may either be identical to the input change or a new +// change object derived from the input. Because of the former case, the caller +// must not mutate the object returned in OutChange. +func reducePlan(addr addrs.ResourceInstance, in *plans.ResourceInstanceChange, destroy bool) *plans.ResourceInstanceChange { + out := in.Simplify(destroy) + if out.Action != in.Action { + if destroy { + log.Printf("[TRACE] reducePlan: %s change simplified from %s to %s for destroy node", addr, in.Action, out.Action) + } else { + log.Printf("[TRACE] reducePlan: %s change simplified from %s to %s for apply node", addr, in.Action, out.Action) + } + } + return out +} diff --git a/terraform/eval_diff_test.go b/terraform/reduce_plan_test.go similarity index 67% rename from terraform/eval_diff_test.go rename to terraform/reduce_plan_test.go index 8f2a100d1..30d819aad 100644 --- a/terraform/eval_diff_test.go +++ b/terraform/reduce_plan_test.go @@ -92,6 +92,30 @@ func TestProcessIgnoreChangesIndividual(t *testing.T) { "b": cty.StringVal("new b value"), }), }, + "map": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("new a0 value"), + "a1": cty.UnknownVal(cty.String), + }), + "b": cty.StringVal("b value"), + }), + []string{`a`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + }, "map_index": { cty.ObjectVal(map[string]cty.Value{ "a": cty.MapVal(map[string]cty.Value{ @@ -136,6 +160,77 @@ func TestProcessIgnoreChangesIndividual(t *testing.T) { "b": cty.StringVal("b value"), }), }, + "map_index_unknown_value": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.UnknownVal(cty.String), + }), + "b": cty.StringVal("b value"), + }), + []string{`a["a1"]`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "map_index_multiple_keys": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + "a2": cty.StringVal("a2 value"), + "a3": cty.StringVal("a3 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Map(cty.String)), + "b": cty.StringVal("new b value"), + }), + []string{`a["a1"]`, `a["a2"]`, `a["a3"]`, `b`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a1": cty.StringVal("a1 value"), + "a2": cty.StringVal("a2 value"), + "a3": cty.StringVal("a3 value"), + }), + "b": cty.StringVal("b value"), + }), + }, + "map_index_redundant": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + "a2": cty.StringVal("a2 value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.NullVal(cty.Map(cty.String)), + "b": cty.StringVal("new b value"), + }), + []string{`a["a1"]`, `a`, `b`}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.MapVal(map[string]cty.Value{ + "a0": cty.StringVal("a0 value"), + "a1": cty.StringVal("a1 value"), + "a2": cty.StringVal("a2 value"), + }), + "b": cty.StringVal("b value"), + }), + }, "missing_map_index": { cty.ObjectVal(map[string]cty.Value{ "a": cty.MapVal(map[string]cty.Value{ @@ -250,6 +345,30 @@ func TestProcessIgnoreChangesIndividual(t *testing.T) { "b": cty.StringVal("new b value"), }), }, + "unknown_object_attribute": { + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("a.foo value"), + "bar": cty.StringVal("a.bar value"), + }), + "b": cty.StringVal("b value"), + }), + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("new a.foo value"), + "bar": cty.UnknownVal(cty.String), + }), + "b": cty.StringVal("new b value"), + }), + []string{"a.bar"}, + cty.ObjectVal(map[string]cty.Value{ + "a": cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("new a.foo value"), + "bar": cty.StringVal("a.bar value"), + }), + "b": cty.StringVal("new b value"), + }), + }, } for name, test := range tests { diff --git a/terraform/resource_provider.go b/terraform/resource_provider.go index dccfec68b..56f47aa76 100644 --- a/terraform/resource_provider.go +++ b/terraform/resource_provider.go @@ -1,226 +1,5 @@ package terraform -// ResourceProvider is a legacy interface for providers. -// -// This is retained only for compatibility with legacy code. The current -// interface for providers is providers.Interface, in the sibling directory -// named "providers". -type ResourceProvider interface { - /********************************************************************* - * Functions related to the provider - *********************************************************************/ - - // ProviderSchema returns the config schema for the main provider - // configuration, as would appear in a "provider" block in the - // configuration files. - // - // Currently not all providers support schema. Callers must therefore - // first call Resources and DataSources and ensure that at least one - // resource or data source has the SchemaAvailable flag set. - GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) - - // Input was used prior to v0.12 to ask the provider to prompt the user - // for input to complete the configuration. - // - // From v0.12 onwards this method is never called because Terraform Core - // is able to handle the necessary input logic itself based on the - // schema returned from GetSchema. - Input(UIInput, *ResourceConfig) (*ResourceConfig, error) - - // Validate is called once at the beginning with the raw configuration - // (no interpolation done) and can return a list of warnings and/or - // errors. - // - // This is called once with the provider configuration only. It may not - // be called at all if no provider configuration is given. - // - // This should not assume that any values of the configurations are valid. - // The primary use case of this call is to check that required keys are - // set. - Validate(*ResourceConfig) ([]string, []error) - - // Configure configures the provider itself with the configuration - // given. This is useful for setting things like access keys. - // - // This won't be called at all if no provider configuration is given. - // - // Configure returns an error if it occurred. - Configure(*ResourceConfig) error - - // Resources returns all the available resource types that this provider - // knows how to manage. - Resources() []ResourceType - - // Stop is called when the provider should halt any in-flight actions. - // - // This can be used to make a nicer Ctrl-C experience for Terraform. - // Even if this isn't implemented to do anything (just returns nil), - // Terraform will still cleanly stop after the currently executing - // graph node is complete. However, this API can be used to make more - // efficient halts. - // - // Stop doesn't have to and shouldn't block waiting for in-flight actions - // to complete. It should take any action it wants and return immediately - // acknowledging it has received the stop request. Terraform core will - // automatically not make any further API calls to the provider soon - // after Stop is called (technically exactly once the currently executing - // graph nodes are complete). - // - // The error returned, if non-nil, is assumed to mean that signaling the - // stop somehow failed and that the user should expect potentially waiting - // a longer period of time. - Stop() error - - /********************************************************************* - * Functions related to individual resources - *********************************************************************/ - - // ValidateResource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per resource. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateResource(string, *ResourceConfig) ([]string, []error) - - // Apply applies a diff to a specific resource and returns the new - // resource state along with an error. - // - // If the resource state given has an empty ID, then a new resource - // is expected to be created. - Apply( - *InstanceInfo, - *InstanceState, - *InstanceDiff) (*InstanceState, error) - - // Diff diffs a resource versus a desired state and returns - // a diff. - Diff( - *InstanceInfo, - *InstanceState, - *ResourceConfig) (*InstanceDiff, error) - - // Refresh refreshes a resource and updates all of its attributes - // with the latest information. - Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) - - /********************************************************************* - * Functions related to importing - *********************************************************************/ - - // ImportState requests that the given resource be imported. - // - // The returned InstanceState only requires ID be set. Importing - // will always call Refresh after the state to complete it. - // - // IMPORTANT: InstanceState doesn't have the resource type attached - // to it. A type must be specified on the state via the Ephemeral - // field on the state. - // - // This function can return multiple states. Normally, an import - // will map 1:1 to a physical resource. However, some resources map - // to multiple. For example, an AWS security group may contain many rules. - // Each rule is represented by a separate resource in Terraform, - // therefore multiple states are returned. - ImportState(*InstanceInfo, string) ([]*InstanceState, error) - - /********************************************************************* - * Functions related to data resources - *********************************************************************/ - - // ValidateDataSource is called once at the beginning with the raw - // configuration (no interpolation done) and can return a list of warnings - // and/or errors. - // - // This is called once per data source instance. - // - // This should not assume any of the values in the resource configuration - // are valid since it is possible they have to be interpolated still. - // The primary use case of this call is to check that the required keys - // are set and that the general structure is correct. - ValidateDataSource(string, *ResourceConfig) ([]string, []error) - - // DataSources returns all of the available data sources that this - // provider implements. - DataSources() []DataSource - - // ReadDataDiff produces a diff that represents the state that will - // be produced when the given data source is read using a later call - // to ReadDataApply. - ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) - - // ReadDataApply initializes a data instance using the configuration - // in a diff produced by ReadDataDiff. - ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) -} - -// ResourceProviderCloser is an interface that providers that can close -// connections that aren't needed anymore must implement. -type ResourceProviderCloser interface { - Close() error -} - -// ResourceType is a type of resource that a resource provider can manage. -type ResourceType struct { - Name string // Name of the resource, example "instance" (no provider prefix) - Importable bool // Whether this resource supports importing - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// DataSource is a data source that a resource provider implements. -type DataSource struct { - Name string - - // SchemaAvailable is set if the provider supports the ProviderSchema, - // ResourceTypeSchema and DataSourceSchema methods. Although it is - // included on each resource type, it's actually a provider-wide setting - // that's smuggled here only because that avoids a breaking change to - // the plugin protocol. - SchemaAvailable bool -} - -// ResourceProviderFactory is a function type that creates a new instance -// of a resource provider. -type ResourceProviderFactory func() (ResourceProvider, error) - -// ResourceProviderFactoryFixed is a helper that creates a -// ResourceProviderFactory that just returns some fixed provider. -func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { - return func() (ResourceProvider, error) { - return p, nil - } -} - -func ProviderHasResource(p ResourceProvider, n string) bool { - for _, rt := range p.Resources() { - if rt.Name == n { - return true - } - } - - return false -} - -func ProviderHasDataSource(p ResourceProvider, n string) bool { - for _, rt := range p.DataSources() { - if rt.Name == n { - return true - } - } - - return false -} - const errPluginInit = ` Plugin reinitialization required. Please run "terraform init". diff --git a/terraform/resource_provider_mock_test.go b/terraform/resource_provider_mock_test.go index 98134bdcf..3bf73d7f9 100644 --- a/terraform/resource_provider_mock_test.go +++ b/terraform/resource_provider_mock_test.go @@ -1,35 +1,17 @@ package terraform import ( - "testing" - - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/providers" "github.com/zclconf/go-cty/cty" ) -func TestMockResourceProvider_impl(t *testing.T) { - var _ ResourceProvider = new(MockResourceProvider) - var _ ResourceProviderCloser = new(MockResourceProvider) -} - -// testProviderComponentFactory creates a componentFactory that contains only -// a single given. -func testProviderComponentFactory(name string, provider providers.Interface) *basicComponentFactory { - return &basicComponentFactory{ - providers: map[addrs.Provider]providers.Factory{ - addrs.NewDefaultProvider(name): providers.FactoryFixed(provider), - }, - } -} - // mockProviderWithConfigSchema is a test helper to concisely create a mock // provider with the given schema for its own configuration. func mockProviderWithConfigSchema(schema *configschema.Block) *MockProvider { return &MockProvider{ - GetSchemaReturn: &ProviderSchema{ - Provider: schema, + GetSchemaResponse: &providers.GetSchemaResponse{ + Provider: providers.Schema{Block: schema}, }, } } @@ -38,40 +20,81 @@ func mockProviderWithConfigSchema(schema *configschema.Block) *MockProvider { // provider with a schema containing a single resource type. func mockProviderWithResourceTypeSchema(name string, schema *configschema.Block) *MockProvider { return &MockProvider{ - GetSchemaReturn: &ProviderSchema{ - Provider: &configschema.Block{ - Attributes: map[string]*configschema.Attribute{ - "string": { - Type: cty.String, - Optional: true, - }, - "list": { - Type: cty.List(cty.String), - Optional: true, - }, - "root": { - Type: cty.Map(cty.String), - Optional: true, + GetSchemaResponse: &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "string": { + Type: cty.String, + Optional: true, + }, + "list": { + Type: cty.List(cty.String), + Optional: true, + }, + "root": { + Type: cty.Map(cty.String), + Optional: true, + }, }, }, }, - ResourceTypes: map[string]*configschema.Block{ - name: schema, + ResourceTypes: map[string]providers.Schema{ + name: providers.Schema{Block: schema}, }, }, } } -// mockProviderWithDataSourceSchema is a test helper to concisely create a mock -// provider with a schema containing a single data source. -func mockProviderWithDataSourceSchema(name string, schema *configschema.Block) *MockResourceProvider { - return &MockResourceProvider{ - GetSchemaReturn: &ProviderSchema{ - DataSources: map[string]*configschema.Block{ - name: schema, +// mockProviderWithProviderSchema is a test helper to create a mock provider +// from an existing ProviderSchema. +func mockProviderWithProviderSchema(providerSchema ProviderSchema) *MockProvider { + p := &MockProvider{ + GetSchemaResponse: &providers.GetSchemaResponse{ + Provider: providers.Schema{ + Block: providerSchema.Provider, }, + ResourceTypes: map[string]providers.Schema{}, + DataSources: map[string]providers.Schema{}, }, } + + for name, schema := range providerSchema.ResourceTypes { + p.GetSchemaResponse.ResourceTypes[name] = providers.Schema{ + Block: schema, + Version: int64(providerSchema.ResourceTypeSchemaVersions[name]), + } + } + + for name, schema := range providerSchema.DataSources { + p.GetSchemaResponse.DataSources[name] = providers.Schema{Block: schema} + } + + return p +} + +// getSchemaResponseFromProviderSchema is a test helper to convert a +// ProviderSchema to a GetSchemaResponse for use when building a mock provider. +func getSchemaResponseFromProviderSchema(providerSchema *ProviderSchema) *providers.GetSchemaResponse { + resp := &providers.GetSchemaResponse{ + Provider: providers.Schema{Block: providerSchema.Provider}, + ProviderMeta: providers.Schema{Block: providerSchema.ProviderMeta}, + ResourceTypes: map[string]providers.Schema{}, + DataSources: map[string]providers.Schema{}, + } + + for name, schema := range providerSchema.ResourceTypes { + resp.ResourceTypes[name] = providers.Schema{ + Block: schema, + Version: int64(providerSchema.ResourceTypeSchemaVersions[name]), + } + } + + for name, schema := range providerSchema.DataSources { + resp.DataSources[name] = providers.Schema{Block: schema} + } + + return resp } // simpleMockProvider returns a MockProvider that is pre-configured @@ -93,13 +116,13 @@ func mockProviderWithDataSourceSchema(name string, schema *configschema.Block) * // objects so that callers can mutate without affecting mock objects. func simpleMockProvider() *MockProvider { return &MockProvider{ - GetSchemaReturn: &ProviderSchema{ - Provider: simpleTestSchema(), - ResourceTypes: map[string]*configschema.Block{ - "test_object": simpleTestSchema(), + GetSchemaResponse: &providers.GetSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": providers.Schema{Block: simpleTestSchema()}, }, - DataSources: map[string]*configschema.Block{ - "test_object": simpleTestSchema(), + DataSources: map[string]providers.Schema{ + "test_object": providers.Schema{Block: simpleTestSchema()}, }, }, } diff --git a/terraform/schemas.go b/terraform/schemas.go index 15f6d5e7b..b3367201f 100644 --- a/terraform/schemas.go +++ b/terraform/schemas.go @@ -106,7 +106,7 @@ func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *con // future calls. schemas[fqn] = &ProviderSchema{} diags = diags.Append( - fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", name, err), + fmt.Errorf("failed to instantiate provider %q to obtain schema: %s", name, err), ) return } @@ -120,7 +120,7 @@ func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *con // future calls. schemas[fqn] = &ProviderSchema{} diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()), + fmt.Errorf("failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()), ) return } @@ -200,14 +200,12 @@ func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *conf // future calls. schemas[name] = &configschema.Block{} diags = diags.Append( - fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), + fmt.Errorf("failed to instantiate provisioner %q to obtain schema: %s", name, err), ) return } defer func() { - if closer, ok := provisioner.(ResourceProvisionerCloser); ok { - closer.Close() - } + provisioner.Close() }() resp := provisioner.GetSchema() @@ -216,7 +214,7 @@ func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *conf // future calls. schemas[name] = &configschema.Block{} diags = diags.Append( - fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), + fmt.Errorf("failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), ) return } @@ -276,10 +274,3 @@ func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeNam func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { return ps.SchemaForResourceType(addr.Mode, addr.Type) } - -// ProviderSchemaRequest is used to describe to a ResourceProvider which -// aspects of schema are required, when calling the GetSchema method. -type ProviderSchemaRequest struct { - ResourceTypes []string - DataSources []string -} diff --git a/terraform/schemas_test.go b/terraform/schemas_test.go index 06b20f73f..b871884ae 100644 --- a/terraform/schemas_test.go +++ b/terraform/schemas_test.go @@ -8,9 +8,10 @@ import ( func simpleTestSchemas() *Schemas { provider := simpleMockProvider() provisioner := simpleMockProvisioner() + return &Schemas{ Providers: map[addrs.Provider]*ProviderSchema{ - addrs.NewDefaultProvider("test"): provider.GetSchemaReturn, + addrs.NewDefaultProvider("test"): provider.ProviderSchema(), }, Provisioners: map[string]*configschema.Block{ "test": provisioner.GetSchemaResponse.Provisioner, diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 96fbcdffe..ffca0ff7b 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -12,12 +12,10 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/zclconf/go-cty/cty" - "github.com/zclconf/go-cty/cty/convert" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/configs/configload" - "github.com/hashicorp/terraform/helper/experiment" "github.com/hashicorp/terraform/internal/initwd" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/providers" @@ -32,19 +30,8 @@ import ( const fixtureDir = "./testdata" func TestMain(m *testing.M) { - // We want to shadow on tests just to make sure the shadow graph works - // in case we need it and to find any race issues. - experiment.SetEnabled(experiment.X_shadow, true) - - experiment.Flag(flag.CommandLine) flag.Parse() - // Make sure shadow operations fail our real tests - contextFailOnShadowError = true - - // Always DeepCopy the Diff on every Plan during a test - contextTestDeepCopyOnPlan = true - // We have fmt.Stringer implementations on lots of objects that hide // details that we very often want to see in tests, so we just disable // spew's use of String methods globally on the assumption that spew @@ -55,37 +42,6 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } -func tempDir(t *testing.T) string { - t.Helper() - - dir, err := ioutil.TempDir("", "tf") - if err != nil { - t.Fatalf("err: %s", err) - } - if err := os.RemoveAll(dir); err != nil { - t.Fatalf("err: %s", err) - } - - return dir -} - -// tempEnv lets you temporarily set an environment variable. It returns -// a function to defer to reset the old value. -// the old value that should be set via a defer. -func tempEnv(t *testing.T, k string, v string) func() { - t.Helper() - - old, oldOk := os.LookupEnv(k) - os.Setenv(k, v) - return func() { - if !oldOk { - os.Unsetenv(k) - } else { - os.Setenv(k, old) - } - } -} - func testModule(t *testing.T, name string) *configs.Config { t.Helper() c, _ := testModuleWithSnapshot(t, name) @@ -208,28 +164,16 @@ func testSetResourceInstanceTainted(module *states.Module, resource, attrsJson, ) } -// testSetResourceInstanceDeposed is a helper function for tests that sets a -// Deposed resource instance for the given module. -func testSetResourceInstanceDeposed(module *states.Module, resource, attrsJson, provider string, key states.DeposedKey) { - module.SetResourceInstanceDeposed( - mustResourceInstanceAddr(resource).Resource, - key, - &states.ResourceInstanceObjectSrc{ - Status: states.ObjectTainted, - AttrsJSON: []byte(attrsJson), - }, - mustProviderConfig(provider), - ) -} - func testProviderFuncFixed(rp providers.Interface) providers.Factory { return func() (providers.Interface, error) { return rp, nil } } -func testProvisionerFuncFixed(rp provisioners.Interface) ProvisionerFactory { +func testProvisionerFuncFixed(rp *MockProvisioner) provisioners.Factory { return func() (provisioners.Interface, error) { + // make sure this provisioner has has not been closed + rp.CloseCalled = false return rp, nil } } @@ -266,22 +210,6 @@ func mustProviderConfig(s string) addrs.AbsProviderConfig { return p } -func instanceObjectIdForTests(obj *states.ResourceInstanceObject) string { - v := obj.Value - if v.IsNull() || !v.IsKnown() { - return "" - } - idVal := v.GetAttr("id") - if idVal.IsNull() || !idVal.IsKnown() { - return "" - } - idVal, err := convert.Convert(idVal, cty.String) - if err != nil { - return "" // placeholder value - } - return idVal.AsString() -} - // HookRecordApplyOrder is a test hook that records the order of applies // by recording the PreApply event. type HookRecordApplyOrder struct { @@ -320,59 +248,12 @@ func (h *HookRecordApplyOrder) PreApply(addr addrs.AbsResourceInstance, gen stat // Below are all the constant strings that are the expected output for // various tests. -const testTerraformInputProviderStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - bar = override - foo = us-east-1 - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - bar = baz - num = 2 - type = aws_instance -` - const testTerraformInputProviderOnlyStr = ` aws_instance.foo: - ID = foo + ID = provider = provider["registry.terraform.io/hashicorp/aws"] foo = us-west-2 - type = aws_instance -` - -const testTerraformInputVarOnlyStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - foo = us-east-1 - type = aws_instance -` - -const testTerraformInputVarOnlyUnsetStr = ` -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - bar = baz - foo = foovalue - type = aws_instance -` - -const testTerraformInputVarsStr = ` -aws_instance.bar: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - bar = override - foo = us-east-1 - type = aws_instance -aws_instance.foo: - ID = foo - provider = provider["registry.terraform.io/hashicorp/aws"] - bar = baz - num = 2 - type = aws_instance + type = ` const testTerraformApplyStr = ` @@ -1171,199 +1052,6 @@ aws_instance.bar: type = aws_instance ` -const testTerraformPlanStr = ` -DIFF: - -CREATE: aws_instance.bar - foo: "" => "2" - type: "" => "aws_instance" -CREATE: aws_instance.foo - num: "" => "2" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanComputedIdStr = ` -DIFF: - -CREATE: aws_instance.bar - foo: "" => "" - type: "" => "aws_instance" -CREATE: aws_instance.foo - foo: "" => "" - num: "" => "2" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanCountIndexZeroStr = ` -DIFF: - -CREATE: aws_instance.foo - foo: "" => "0" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanEmptyStr = ` -DIFF: - -CREATE: aws_instance.bar -CREATE: aws_instance.foo - -STATE: - - -` - -const testTerraformPlanEscapedVarStr = ` -DIFF: - -CREATE: aws_instance.foo - foo: "" => "bar-${baz}" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanModulesStr = ` -DIFF: - -CREATE: aws_instance.bar - foo: "" => "2" - type: "" => "aws_instance" -CREATE: aws_instance.foo - num: "" => "2" - type: "" => "aws_instance" - -module.child: - CREATE: aws_instance.foo - num: "" => "2" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanModuleCycleStr = ` -DIFF: - -CREATE: aws_instance.b -CREATE: aws_instance.c - some_input: "" => "" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanModuleInputStr = ` -DIFF: - -CREATE: aws_instance.bar - foo: "" => "2" - type: "" => "aws_instance" - -module.child: - CREATE: aws_instance.foo - foo: "" => "42" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanModuleInputComputedStr = ` -DIFF: - -CREATE: aws_instance.bar - compute: "" => "foo" - compute_value: "" => "" - foo: "" => "" - type: "" => "aws_instance" - -module.child: - CREATE: aws_instance.foo - foo: "" => "" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanModuleVarIntStr = ` -DIFF: - -module.child: - CREATE: aws_instance.foo - num: "" => "2" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformPlanMultipleTaintStr = ` -DIFF: - -DESTROY/CREATE: aws_instance.bar - foo: "" => "2" - type: "" => "aws_instance" - -STATE: - -aws_instance.bar: (2 tainted) - ID = - Tainted ID 1 = baz - Tainted ID 2 = zip -aws_instance.foo: - ID = bar - num = 2 -` - -const testTerraformPlanVarMultiCountOneStr = ` -DIFF: - -CREATE: aws_instance.bar - foo: "" => "2" - type: "" => "aws_instance" -CREATE: aws_instance.foo - num: "" => "2" - type: "" => "aws_instance" - -STATE: - - -` - -const testTerraformInputHCL = ` -hcl_instance.hcltest: - ID = foo - provider = provider["registry.terraform.io/hashicorp/hcl"] - bar.w = z - bar.x = y - foo.# = 2 - foo.0 = a - foo.1 = b - type = hcl_instance -` - const testTerraformRefreshDataRefDataStr = ` data.null_data_source.bar: ID = foo diff --git a/terraform/testdata/apply-data-sensitive/main.tf b/terraform/testdata/apply-data-sensitive/main.tf new file mode 100644 index 000000000..c248a7c33 --- /dev/null +++ b/terraform/testdata/apply-data-sensitive/main.tf @@ -0,0 +1,8 @@ +variable "foo" { + sensitive = true + default = "foo" +} + +data "null_data_source" "testing" { + foo = var.foo +} diff --git a/terraform/testdata/apply-destroy-data-cycle/main.tf b/terraform/testdata/apply-destroy-data-cycle/main.tf index bd72a47e3..591af8200 100644 --- a/terraform/testdata/apply-destroy-data-cycle/main.tf +++ b/terraform/testdata/apply-destroy-data-cycle/main.tf @@ -8,3 +8,7 @@ data "null_data_source" "d" { resource "null_resource" "a" { count = local.l == "NONE" ? 1 : 0 } + +provider "test" { + foo = data.null_data_source.d.id +} diff --git a/terraform/testdata/apply-destroy-data-resource/main.tf b/terraform/testdata/apply-destroy-data-resource/main.tf index cb16d9f34..0d941a707 100644 --- a/terraform/testdata/apply-destroy-data-resource/main.tf +++ b/terraform/testdata/apply-destroy-data-resource/main.tf @@ -1,5 +1,3 @@ data "null_data_source" "testing" { - inputs = { - test = "yes" - } + foo = "yes" } diff --git a/terraform/testdata/apply-good-create-before-count/main.tf b/terraform/testdata/apply-good-create-before-count/main.tf deleted file mode 100644 index 324ad5285..000000000 --- a/terraform/testdata/apply-good-create-before-count/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "bar" { - count = 2 - require_new = "xyz" - lifecycle { - create_before_destroy = true - } -} diff --git a/terraform/testdata/apply-provisioner-conninfo/main.tf b/terraform/testdata/apply-provisioner-conninfo/main.tf deleted file mode 100644 index 5166d22ba..000000000 --- a/terraform/testdata/apply-provisioner-conninfo/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -variable "pass" { -} - -variable "value" { -} - -resource "aws_instance" "foo" { - num = "2" - compute = "value" - compute_value = "${var.value}" -} - -resource "aws_instance" "bar" { - connection { - host = "localhost" - type = "telnet" - } - - provisioner "shell" { - foo = "${aws_instance.foo.value}" - connection { - host = "localhost" - type = "telnet" - user = "superuser" - port = 2222 - password = "${var.pass}" - } - } -} diff --git a/terraform/testdata/apply-provisioner-each/main.tf b/terraform/testdata/apply-provisioner-each/main.tf deleted file mode 100644 index 29be7206e..000000000 --- a/terraform/testdata/apply-provisioner-each/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "bar" { - for_each = toset(["a"]) - provisioner "shell" { - when = "destroy" - command = "echo ${each.key}" - } -} diff --git a/terraform/testdata/apply-resource-scale-in/main.tf b/terraform/testdata/apply-resource-scale-in/main.tf index 0363d89b7..8cb38473e 100644 --- a/terraform/testdata/apply-resource-scale-in/main.tf +++ b/terraform/testdata/apply-resource-scale-in/main.tf @@ -5,7 +5,7 @@ resource "aws_instance" "one" { } locals { - one_id = element(concat(aws_instance.one.*.id, list("")), 0) + one_id = element(concat(aws_instance.one.*.id, [""]), 0) } resource "aws_instance" "two" { diff --git a/terraform/testdata/empty-with-child-module/child/child.tf b/terraform/testdata/empty-with-child-module/child/child.tf deleted file mode 100644 index 05e29577e..000000000 --- a/terraform/testdata/empty-with-child-module/child/child.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "grandchild" { - source = "../grandchild" -} diff --git a/terraform/testdata/empty-with-child-module/grandchild/grandchild.tf b/terraform/testdata/empty-with-child-module/grandchild/grandchild.tf deleted file mode 100644 index 4b41c9fcf..000000000 --- a/terraform/testdata/empty-with-child-module/grandchild/grandchild.tf +++ /dev/null @@ -1 +0,0 @@ -# Nothing here! diff --git a/terraform/testdata/graph-builder-basic/main.tf b/terraform/testdata/graph-builder-basic/main.tf deleted file mode 100644 index add0dd43f..000000000 --- a/terraform/testdata/graph-builder-basic/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" {} -resource "aws_instance" "db" {} -resource "aws_instance" "web" { - foo = "${aws_instance.db.id}" -} diff --git a/terraform/testdata/graph-builder-cbd-non-cbd/main.tf b/terraform/testdata/graph-builder-cbd-non-cbd/main.tf deleted file mode 100644 index f478d4f33..000000000 --- a/terraform/testdata/graph-builder-cbd-non-cbd/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "aws" {} - -resource "aws_lc" "foo" {} - -resource "aws_asg" "foo" { - lc = "${aws_lc.foo.id}" - - lifecycle { create_before_destroy = true } -} diff --git a/terraform/testdata/graph-builder-modules/consul/main.tf b/terraform/testdata/graph-builder-modules/consul/main.tf deleted file mode 100644 index 8b0469b8c..000000000 --- a/terraform/testdata/graph-builder-modules/consul/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -provider "aws" {} -resource "aws_instance" "server" {} diff --git a/terraform/testdata/graph-builder-modules/main.tf b/terraform/testdata/graph-builder-modules/main.tf deleted file mode 100644 index 8e8b532dd..000000000 --- a/terraform/testdata/graph-builder-modules/main.tf +++ /dev/null @@ -1,16 +0,0 @@ -module "consul" { - foo = "${aws_security_group.firewall.foo}" - source = "./consul" -} - -provider "aws" {} - -resource "aws_security_group" "firewall" {} - -resource "aws_instance" "web" { - security_groups = [ - "foo", - "${aws_security_group.firewall.foo}", - "${module.consul.security_group}" - ] -} diff --git a/terraform/testdata/graph-builder-multi-level-module/foo/bar/main.tf b/terraform/testdata/graph-builder-multi-level-module/foo/bar/main.tf deleted file mode 100644 index 6ee0b2889..000000000 --- a/terraform/testdata/graph-builder-multi-level-module/foo/bar/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -variable "bar" {} -output "value" { value = "${var.bar}" } diff --git a/terraform/testdata/graph-builder-multi-level-module/foo/main.tf b/terraform/testdata/graph-builder-multi-level-module/foo/main.tf deleted file mode 100644 index dbe120fb4..000000000 --- a/terraform/testdata/graph-builder-multi-level-module/foo/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -module "bar" { - source = "./bar" - bar = "${var.foo}" -} - -variable "foo" {} diff --git a/terraform/testdata/graph-builder-multi-level-module/main.tf b/terraform/testdata/graph-builder-multi-level-module/main.tf deleted file mode 100644 index 3962c1d14..000000000 --- a/terraform/testdata/graph-builder-multi-level-module/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -module "foo" { - source = "./foo" - foo = "bar" -} diff --git a/terraform/testdata/graph-builder-orphan-deps/main.tf b/terraform/testdata/graph-builder-orphan-deps/main.tf deleted file mode 100644 index b21d3b6ab..000000000 --- a/terraform/testdata/graph-builder-orphan-deps/main.tf +++ /dev/null @@ -1 +0,0 @@ -provider "aws" {} diff --git a/terraform/testdata/graph-count-var-resource/main.tf b/terraform/testdata/graph-count-var-resource/main.tf deleted file mode 100644 index 9c7407fa5..000000000 --- a/terraform/testdata/graph-count-var-resource/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" {} - -resource "aws_instance" "web" { - count = "${aws_instance.foo.bar}" -} - -resource "aws_load_balancer" "weblb" { - members = "${aws_instance.web.*.id}" -} diff --git a/terraform/testdata/graph-count/main.tf b/terraform/testdata/graph-count/main.tf deleted file mode 100644 index c6fdf97e4..000000000 --- a/terraform/testdata/graph-count/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "web" { - count = 3 -} - -resource "aws_load_balancer" "weblb" { - members = "${aws_instance.web.*.id}" -} diff --git a/terraform/testdata/graph-cycle/main.tf b/terraform/testdata/graph-cycle/main.tf deleted file mode 100644 index 1f7a3a763..000000000 --- a/terraform/testdata/graph-cycle/main.tf +++ /dev/null @@ -1,18 +0,0 @@ -variable "foo" { - default = "bar" - description = "bar" -} - -provider "aws" { - foo = "${aws_security_group.firewall.value}" -} - -resource "aws_security_group" "firewall" {} - -resource "aws_instance" "web" { - ami = "${var.foo}" - security_groups = [ - "foo", - "${aws_security_group.firewall.foo}" - ] -} diff --git a/terraform/testdata/graph-depends-on-count/main.tf b/terraform/testdata/graph-depends-on-count/main.tf deleted file mode 100644 index 7e005f172..000000000 --- a/terraform/testdata/graph-depends-on-count/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "aws_instance" "web" {} - -resource "aws_instance" "db" { - depends_on = ["aws_instance.web"] - count = 2 -} diff --git a/terraform/testdata/graph-depends-on/main.tf b/terraform/testdata/graph-depends-on/main.tf deleted file mode 100644 index 5a5430917..000000000 --- a/terraform/testdata/graph-depends-on/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "web" {} - -resource "aws_instance" "db" { - depends_on = ["aws_instance.web"] -} diff --git a/terraform/testdata/graph-diff-create-before/main.tf b/terraform/testdata/graph-diff-create-before/main.tf deleted file mode 100644 index 2cfe794d1..000000000 --- a/terraform/testdata/graph-diff-create-before/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -provider "aws" {} - -resource "aws_instance" "bar" { - ami = "abc" - lifecycle { - create_before_destroy = true - } -} diff --git a/terraform/testdata/graph-diff-destroy/main.tf b/terraform/testdata/graph-diff-destroy/main.tf deleted file mode 100644 index 1df2421fc..000000000 --- a/terraform/testdata/graph-diff-destroy/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -provider "aws" {} - -resource "aws_instance" "foo" { -} - -resource "aws_instance" "bar" { - foo = "${aws_instance.foo.id}" -} diff --git a/terraform/testdata/graph-diff-module-dep-module/bar/main.tf b/terraform/testdata/graph-diff-module-dep-module/bar/main.tf deleted file mode 100644 index 6f71b621f..000000000 --- a/terraform/testdata/graph-diff-module-dep-module/bar/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -variable "in" {} - -aws_instance "foo" {} diff --git a/terraform/testdata/graph-diff-module-dep-module/foo/main.tf b/terraform/testdata/graph-diff-module-dep-module/foo/main.tf deleted file mode 100644 index 2bf29d59e..000000000 --- a/terraform/testdata/graph-diff-module-dep-module/foo/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -output "data" { - value = "foo" -} - -aws_instance "foo" {} diff --git a/terraform/testdata/graph-diff-module-dep-module/main.tf b/terraform/testdata/graph-diff-module-dep-module/main.tf deleted file mode 100644 index 656503f28..000000000 --- a/terraform/testdata/graph-diff-module-dep-module/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -module "foo" { - source = "./foo" -} - -module "bar" { - source = "./bar" - in = "${module.foo.data}" -} diff --git a/terraform/testdata/graph-diff-module-dep/child/main.tf b/terraform/testdata/graph-diff-module-dep/child/main.tf deleted file mode 100644 index 84d1de905..000000000 --- a/terraform/testdata/graph-diff-module-dep/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" {} - -output "bar" { - value = "baz" -} diff --git a/terraform/testdata/graph-diff-module-dep/main.tf b/terraform/testdata/graph-diff-module-dep/main.tf deleted file mode 100644 index 2f61386b2..000000000 --- a/terraform/testdata/graph-diff-module-dep/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" {} - -module "child" { - source = "./child" - in = "${aws_instance.foo.id}" -} - - diff --git a/terraform/testdata/graph-diff-module/child/main.tf b/terraform/testdata/graph-diff-module/child/main.tf deleted file mode 100644 index 84d1de905..000000000 --- a/terraform/testdata/graph-diff-module/child/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" {} - -output "bar" { - value = "baz" -} diff --git a/terraform/testdata/graph-diff-module/main.tf b/terraform/testdata/graph-diff-module/main.tf deleted file mode 100644 index 2b823f117..000000000 --- a/terraform/testdata/graph-diff-module/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "child" { - source = "./child" -} - -resource "aws_instance" "foo" { - value = "${module.child.bar}" -} diff --git a/terraform/testdata/graph-diff/main.tf b/terraform/testdata/graph-diff/main.tf deleted file mode 100644 index b626e60c8..000000000 --- a/terraform/testdata/graph-diff/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "aws_instance" "foo" { -} diff --git a/terraform/testdata/graph-missing-deps/main.tf b/terraform/testdata/graph-missing-deps/main.tf deleted file mode 100644 index 44297f318..000000000 --- a/terraform/testdata/graph-missing-deps/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "db" {} - -resource "aws_instance" "web" { - foo = "${aws_instance.lb.id}" -} diff --git a/terraform/testdata/graph-module-orphan/main.tf b/terraform/testdata/graph-module-orphan/main.tf deleted file mode 100644 index 307463d30..000000000 --- a/terraform/testdata/graph-module-orphan/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -provider "aws" {} - -resource "aws_security_group" "firewall" {} - -resource "aws_instance" "web" { - security_groups = [ - "foo", - "${aws_security_group.firewall.foo}" - ] -} diff --git a/terraform/testdata/graph-modules/consul/main.tf b/terraform/testdata/graph-modules/consul/main.tf deleted file mode 100644 index 9e22d04d8..000000000 --- a/terraform/testdata/graph-modules/consul/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "server" {} - -output "security_group" { value = "" } diff --git a/terraform/testdata/graph-modules/main.tf b/terraform/testdata/graph-modules/main.tf deleted file mode 100644 index 8e8b532dd..000000000 --- a/terraform/testdata/graph-modules/main.tf +++ /dev/null @@ -1,16 +0,0 @@ -module "consul" { - foo = "${aws_security_group.firewall.foo}" - source = "./consul" -} - -provider "aws" {} - -resource "aws_security_group" "firewall" {} - -resource "aws_instance" "web" { - security_groups = [ - "foo", - "${aws_security_group.firewall.foo}", - "${module.consul.security_group}" - ] -} diff --git a/terraform/testdata/graph-node-module-expand/child/main.tf b/terraform/testdata/graph-node-module-expand/child/main.tf deleted file mode 100644 index f14f189b0..000000000 --- a/terraform/testdata/graph-node-module-expand/child/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_instance" "foo" {} -resource "aws_instance" "bar" { - var = "${aws_instance.foo.whatever}" -} diff --git a/terraform/testdata/graph-node-module-expand/main.tf b/terraform/testdata/graph-node-module-expand/main.tf deleted file mode 100644 index 0f6991c53..000000000 --- a/terraform/testdata/graph-node-module-expand/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/terraform/testdata/graph-node-module-flatten/child/main.tf b/terraform/testdata/graph-node-module-flatten/child/main.tf deleted file mode 100644 index 919f140bb..000000000 --- a/terraform/testdata/graph-node-module-flatten/child/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/terraform/testdata/graph-node-module-flatten/main.tf b/terraform/testdata/graph-node-module-flatten/main.tf deleted file mode 100644 index 0f6991c53..000000000 --- a/terraform/testdata/graph-node-module-flatten/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/terraform/testdata/graph-outputs/main.tf b/terraform/testdata/graph-outputs/main.tf deleted file mode 100644 index 92c4bf226..000000000 --- a/terraform/testdata/graph-outputs/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" {} - -output "foo" { - value = "${aws_instance.foo.value}" -} diff --git a/terraform/testdata/graph-provider-alias/main.tf b/terraform/testdata/graph-provider-alias/main.tf deleted file mode 100644 index f7c319fc5..000000000 --- a/terraform/testdata/graph-provider-alias/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -provider "aws" { -} - -provider "aws" { - alias = "foo" -} - -provider "aws" { - alias = "bar" -} \ No newline at end of file diff --git a/terraform/testdata/graph-provider-prune/main.tf b/terraform/testdata/graph-provider-prune/main.tf deleted file mode 100644 index ac2f526fa..000000000 --- a/terraform/testdata/graph-provider-prune/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" {} -provider "digitalocean" {} -provider "openstack" {} - -resource "aws_load_balancer" "weblb" {} diff --git a/terraform/testdata/graph-provisioners/main.tf b/terraform/testdata/graph-provisioners/main.tf deleted file mode 100644 index 6e1e93aac..000000000 --- a/terraform/testdata/graph-provisioners/main.tf +++ /dev/null @@ -1,33 +0,0 @@ -variable "foo" { - default = "bar" - description = "bar" -} - -provider "aws" {} - -resource "aws_security_group" "firewall" {} - -resource "aws_instance" "web" { - ami = "${var.foo}" - security_groups = [ - "foo", - "${aws_security_group.firewall.foo}" - ] - provisioner "winrm" { - cmd = "echo foo" - } - provisioner "winrm" { - cmd = "echo bar" - } -} - -resource "aws_load_balancer" "weblb" { - provisioner "shell" { - cmd = "add ${aws_instance.web.id}" - connection { - host = "localhost" - type = "magic" - user = "${aws_security_group.firewall.id}" - } - } -} diff --git a/terraform/testdata/graph-resource-expand-prov-deps/main.tf b/terraform/testdata/graph-resource-expand-prov-deps/main.tf deleted file mode 100644 index a8c8efd85..000000000 --- a/terraform/testdata/graph-resource-expand-prov-deps/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "web" { - count = 3 - - provisioner "remote-exec" { - inline = ["echo ${aws_instance.web.0.foo}"] - } -} diff --git a/terraform/testdata/graph-resource-expand/main.tf b/terraform/testdata/graph-resource-expand/main.tf deleted file mode 100644 index b00b04eff..000000000 --- a/terraform/testdata/graph-resource-expand/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "web" { - count = 3 -} diff --git a/terraform/testdata/graph-tainted/main.tf b/terraform/testdata/graph-tainted/main.tf deleted file mode 100644 index da7eb0a79..000000000 --- a/terraform/testdata/graph-tainted/main.tf +++ /dev/null @@ -1,18 +0,0 @@ -variable "foo" { - default = "bar" - description = "bar" -} - -provider "aws" { - foo = "${openstack_floating_ip.random.value}" -} - -resource "aws_security_group" "firewall" {} - -resource "aws_instance" "web" { - ami = "${var.foo}" - security_groups = [ - "foo", - "${aws_security_group.firewall.foo}" - ] -} diff --git a/terraform/testdata/import-provider-alias/main.tf b/terraform/testdata/import-provider-alias/main.tf deleted file mode 100644 index d145d088e..000000000 --- a/terraform/testdata/import-provider-alias/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -provider "aws" { - foo = "bar" - alias = "alias" -} diff --git a/terraform/testdata/input-bad-var-default/main.tf b/terraform/testdata/input-bad-var-default/main.tf deleted file mode 100644 index bc34bb8e9..000000000 --- a/terraform/testdata/input-bad-var-default/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "test" { - default = { - l = [1, 2, 3] - } -} diff --git a/terraform/testdata/input-hcl/main.tf b/terraform/testdata/input-hcl/main.tf deleted file mode 100644 index ca46ee8e9..000000000 --- a/terraform/testdata/input-hcl/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -variable "mapped" { - type = "map" -} - -variable "listed" { - type = "list" -} - -resource "hcl_instance" "hcltest" { - foo = "${var.listed}" - bar = "${var.mapped}" -} diff --git a/terraform/testdata/input-module-computed-output-element/main.tf b/terraform/testdata/input-module-computed-output-element/main.tf deleted file mode 100644 index bb96e24a3..000000000 --- a/terraform/testdata/input-module-computed-output-element/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -module "b" { - source = "./modb" -} - -module "a" { - source = "./moda" - - single_element = "${element(module.b.computed_list, 0)}" -} diff --git a/terraform/testdata/input-module-computed-output-element/moda/main.tf b/terraform/testdata/input-module-computed-output-element/moda/main.tf deleted file mode 100644 index eb09eb192..000000000 --- a/terraform/testdata/input-module-computed-output-element/moda/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -variable "single_element" { - type = "string" -} diff --git a/terraform/testdata/input-module-computed-output-element/modb/main.tf b/terraform/testdata/input-module-computed-output-element/modb/main.tf deleted file mode 100644 index ebe4a7eff..000000000 --- a/terraform/testdata/input-module-computed-output-element/modb/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "test" { - count = 3 -} - -output "computed_list" { - value = ["${aws_instance.test.*.id}"] -} diff --git a/terraform/testdata/input-variables-invalid/main.tf b/terraform/testdata/input-variables-invalid/main.tf deleted file mode 100644 index 9d6d49aa3..000000000 --- a/terraform/testdata/input-variables-invalid/main.tf +++ /dev/null @@ -1,30 +0,0 @@ -# Required -variable "foo" { -} - -# Optional -variable "bar" { - default = "baz" -} - -# Mapping -variable "map" { - default = { - foo = "bar" - } -} - -# Complex Object Types -variable "object_map" { - type = map(object({ - foo = string, - bar = any - })) -} - -variable "object_list" { - type = list(object({ - foo = string, - bar = any - })) -} diff --git a/terraform/testdata/input-variables-invalid/terraform.tfvars b/terraform/testdata/input-variables-invalid/terraform.tfvars deleted file mode 100644 index 4ebc83913..000000000 --- a/terraform/testdata/input-variables-invalid/terraform.tfvars +++ /dev/null @@ -1,13 +0,0 @@ -test = [ - { - foo = "blah1" - bar = {} - }, - { - foo = "blah2" - bar = { - var1 = "val1", - var2 = "var2" - } - } -] diff --git a/terraform/testdata/interpolate-local/main.tf b/terraform/testdata/interpolate-local/main.tf deleted file mode 100644 index 699667a14..000000000 --- a/terraform/testdata/interpolate-local/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -locals { - foo = "..." -} diff --git a/terraform/testdata/interpolate-multi-interp/main.tf b/terraform/testdata/interpolate-multi-interp/main.tf deleted file mode 100644 index 1d475e2ee..000000000 --- a/terraform/testdata/interpolate-multi-interp/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "web" { - count = "${var.c}" -} diff --git a/terraform/testdata/interpolate-multi-vars/main.tf b/terraform/testdata/interpolate-multi-vars/main.tf deleted file mode 100644 index b24d02f98..000000000 --- a/terraform/testdata/interpolate-multi-vars/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_route53_zone" "yada" { - -} - -resource "aws_route53_zone" "terra" { - count = 2 -} diff --git a/terraform/testdata/interpolate-path-module/child/main.tf b/terraform/testdata/interpolate-path-module/child/main.tf deleted file mode 100644 index e69de29bb..000000000 diff --git a/terraform/testdata/interpolate-path-module/main.tf b/terraform/testdata/interpolate-path-module/main.tf deleted file mode 100644 index 0f6991c53..000000000 --- a/terraform/testdata/interpolate-path-module/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "child" { - source = "./child" -} diff --git a/terraform/testdata/interpolate-resource-variable-multi/main.tf b/terraform/testdata/interpolate-resource-variable-multi/main.tf deleted file mode 100644 index b00b04eff..000000000 --- a/terraform/testdata/interpolate-resource-variable-multi/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "web" { - count = 3 -} diff --git a/terraform/testdata/interpolate-resource-variable/main.tf b/terraform/testdata/interpolate-resource-variable/main.tf deleted file mode 100644 index 64cbf6236..000000000 --- a/terraform/testdata/interpolate-resource-variable/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "web" {} diff --git a/terraform/testdata/module-deps-explicit-provider-resource/main.tf b/terraform/testdata/module-deps-explicit-provider-resource/main.tf deleted file mode 100644 index d5990b09c..000000000 --- a/terraform/testdata/module-deps-explicit-provider-resource/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -provider "foo" { - version = ">=1.0.0" -} - -resource "foo_bar" "test1" { - -} diff --git a/terraform/testdata/module-deps-explicit-provider-unconstrained/main.tf b/terraform/testdata/module-deps-explicit-provider-unconstrained/main.tf deleted file mode 100644 index 6144ff539..000000000 --- a/terraform/testdata/module-deps-explicit-provider-unconstrained/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -provider "foo" { -} diff --git a/terraform/testdata/module-deps-explicit-provider/main.tf b/terraform/testdata/module-deps-explicit-provider/main.tf deleted file mode 100644 index 27d423759..000000000 --- a/terraform/testdata/module-deps-explicit-provider/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -provider "foo" { - version = ">=1.0.0" -} - -provider "foo" { - version = ">=2.0.0" - alias = "bar" -} diff --git a/terraform/testdata/module-deps-implicit-provider/main.tf b/terraform/testdata/module-deps-implicit-provider/main.tf deleted file mode 100644 index 15aa2cb72..000000000 --- a/terraform/testdata/module-deps-implicit-provider/main.tf +++ /dev/null @@ -1,8 +0,0 @@ - -resource "foo_bar" "test1" { - -} - -resource "foo_bar" "test2" { - provider = "foo.baz" -} diff --git a/terraform/testdata/module-deps-inherit-provider/child/child.tf b/terraform/testdata/module-deps-inherit-provider/child/child.tf deleted file mode 100644 index 51e0950a0..000000000 --- a/terraform/testdata/module-deps-inherit-provider/child/child.tf +++ /dev/null @@ -1,17 +0,0 @@ - -# "foo" is inherited from the parent module -resource "foo_bar" "test" { - -} - -# but we don't use the "bar" provider inherited from the parent - -# "baz" is introduced here for the first time, so it's an implicit -# dependency -resource "baz_bar" "test" { - -} - -module "grandchild" { - source = "../grandchild" -} diff --git a/terraform/testdata/module-deps-inherit-provider/grandchild/grandchild.tf b/terraform/testdata/module-deps-inherit-provider/grandchild/grandchild.tf deleted file mode 100644 index c5a07249f..000000000 --- a/terraform/testdata/module-deps-inherit-provider/grandchild/grandchild.tf +++ /dev/null @@ -1,11 +0,0 @@ - -# Here we *override* the foo from the parent -provider "foo" { - -} - -# We also use the "bar" provider defined at the root, which was -# completely ignored by the child module in between. -resource "bar_thing" "test" { - -} diff --git a/terraform/testdata/module-deps-inherit-provider/main.tf b/terraform/testdata/module-deps-inherit-provider/main.tf deleted file mode 100644 index 9842855b9..000000000 --- a/terraform/testdata/module-deps-inherit-provider/main.tf +++ /dev/null @@ -1,11 +0,0 @@ - -provider "foo" { -} - -provider "bar" { - -} - -module "child" { - source = "./child" -} diff --git a/terraform/testdata/module-deps-required-providers/main.tf b/terraform/testdata/module-deps-required-providers/main.tf deleted file mode 100644 index e39cc897b..000000000 --- a/terraform/testdata/module-deps-required-providers/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -terraform { - required_providers { - foo = { - version = ">=1.0.0" - } - } -} diff --git a/terraform/testdata/new-good/main.tf b/terraform/testdata/new-good/main.tf deleted file mode 100644 index 40ed1a89c..000000000 --- a/terraform/testdata/new-good/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -provider "aws" { - foo = "bar" -} - -resource "aws_instance" "foo" {} -resource "do_droplet" "bar" {} diff --git a/terraform/testdata/new-graph-cycle/main.tf b/terraform/testdata/new-graph-cycle/main.tf deleted file mode 100644 index b4285424f..000000000 --- a/terraform/testdata/new-graph-cycle/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -resource "aws_instance" "foo" { - ami = "${aws_instance.bar.id}" -} - -resource "aws_instance" "bar" { - ami = "${aws_instance.foo.id}" -} diff --git a/terraform/testdata/new-pc-cache/main.tf b/terraform/testdata/new-pc-cache/main.tf deleted file mode 100644 index 617fd43bf..000000000 --- a/terraform/testdata/new-pc-cache/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -provider "aws" { - foo = "bar" -} - -provider "aws_elb" { - foo = "baz" -} - -resource "aws_instance" "foo" {} -resource "aws_instance" "bar" {} -resource "aws_elb" "lb" {} -resource "do_droplet" "bar" {} diff --git a/terraform/testdata/new-provider-validate/main.tf b/terraform/testdata/new-provider-validate/main.tf deleted file mode 100644 index 9ba300bad..000000000 --- a/terraform/testdata/new-provider-validate/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" { - foo = "bar" -} - -resource "aws_instance" "foo" {} diff --git a/terraform/testdata/new-variables/main.tf b/terraform/testdata/new-variables/main.tf deleted file mode 100644 index 8a42d5cda..000000000 --- a/terraform/testdata/new-variables/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -variable "foo" {} -variable "bar" { - default = "baz" -} diff --git a/terraform/testdata/plan-data-source-type-mismatch/main.tf b/terraform/testdata/plan-data-source-type-mismatch/main.tf deleted file mode 100644 index d0782f261..000000000 --- a/terraform/testdata/plan-data-source-type-mismatch/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -data "aws_availability_zones" "azs" {} -resource "aws_instance" "foo" { - ami = "${data.aws_availability_zones.azs.names}" -} diff --git a/terraform/testdata/plan-for-each/main.tf b/terraform/testdata/plan-for-each/main.tf index bffb079cb..94572e20a 100644 --- a/terraform/testdata/plan-for-each/main.tf +++ b/terraform/testdata/plan-for-each/main.tf @@ -13,7 +13,7 @@ resource "aws_instance" "bar" { for_each = toset([]) } resource "aws_instance" "bar2" { - for_each = toset(list("z", "y", "x")) + for_each = toset(["z", "y", "x"]) } # an empty map should generate no resource diff --git a/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf b/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf index c3b0270b0..3b0cc6664 100644 --- a/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf +++ b/terraform/testdata/plan-module-destroy-gh-1835/b/main.tf @@ -1,5 +1,5 @@ variable "a_id" {} resource "aws_instance" "b" { - command = "echo ${var.a_id}" + foo = "echo ${var.a_id}" } diff --git a/terraform/testdata/plan-provider-init/main.tf b/terraform/testdata/plan-provider-init/main.tf deleted file mode 100644 index ca800ad7b..000000000 --- a/terraform/testdata/plan-provider-init/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "do" { - foo = "${aws_instance.foo.num}" -} - -resource "aws_instance" "foo" { - num = "2" -} - -resource "do_droplet" "bar" {} diff --git a/terraform/testdata/plan-targeted-with-tainted/main.tf b/terraform/testdata/plan-targeted-with-tainted/main.tf deleted file mode 100644 index f17e08094..000000000 --- a/terraform/testdata/plan-targeted-with-tainted/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "ifailedprovisioners" { -} - -resource "aws_instance" "iambeingadded" { -} diff --git a/terraform/testdata/plan-variable-sensitivity-module/child/main.tf b/terraform/testdata/plan-variable-sensitivity-module/child/main.tf index 1e1a07b1d..e34751aa9 100644 --- a/terraform/testdata/plan-variable-sensitivity-module/child/main.tf +++ b/terraform/testdata/plan-variable-sensitivity-module/child/main.tf @@ -2,6 +2,12 @@ variable "foo" { type = string } -resource "aws_instance" "foo" { - foo = var.foo +// "bar" is defined as sensitive by both the parent and the child +variable "bar" { + sensitive = true +} + +resource "aws_instance" "foo" { + foo = var.foo + value = var.bar } diff --git a/terraform/testdata/plan-variable-sensitivity-module/main.tf b/terraform/testdata/plan-variable-sensitivity-module/main.tf index 28ac1dfb9..69bdbb4cb 100644 --- a/terraform/testdata/plan-variable-sensitivity-module/main.tf +++ b/terraform/testdata/plan-variable-sensitivity-module/main.tf @@ -3,7 +3,12 @@ variable "sensitive_var" { sensitive = true } +variable "another_var" { + sensitive = true +} + module "child" { source = "./child" foo = var.sensitive_var + bar = var.another_var } diff --git a/terraform/testdata/refresh-config-orphan/main.tf b/terraform/testdata/refresh-config-orphan/main.tf deleted file mode 100644 index c1c8b23da..000000000 --- a/terraform/testdata/refresh-config-orphan/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "test_object" "foo" { - count = 3 -} diff --git a/terraform/testdata/refresh-data-scale-inout/main.tf b/terraform/testdata/refresh-data-scale-inout/main.tf deleted file mode 100644 index 480ba9483..000000000 --- a/terraform/testdata/refresh-data-scale-inout/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "aws_instance" "foo" { - count = 3 -} diff --git a/terraform/testdata/refresh-resource-scale-inout/main.tf b/terraform/testdata/refresh-resource-scale-inout/main.tf deleted file mode 100644 index acef373b3..000000000 --- a/terraform/testdata/refresh-resource-scale-inout/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 -} diff --git a/terraform/testdata/state-module-orphans/bar/main.tf b/terraform/testdata/state-module-orphans/bar/main.tf deleted file mode 100644 index c01ade299..000000000 --- a/terraform/testdata/state-module-orphans/bar/main.tf +++ /dev/null @@ -1 +0,0 @@ -# Nothing diff --git a/terraform/testdata/state-module-orphans/main.tf b/terraform/testdata/state-module-orphans/main.tf deleted file mode 100644 index f009f1924..000000000 --- a/terraform/testdata/state-module-orphans/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -module "bar" { - source = "./bar" -} diff --git a/terraform/testdata/state-upgrade/v1-to-v2-empty-path.tfstate b/terraform/testdata/state-upgrade/v1-to-v2-empty-path.tfstate deleted file mode 100644 index ee7c9d187..000000000 --- a/terraform/testdata/state-upgrade/v1-to-v2-empty-path.tfstate +++ /dev/null @@ -1,38 +0,0 @@ -{ - "version": 1, - "modules": [{ - "resources": { - "aws_instance.foo1": {"primary":{}}, - "cloudstack_instance.foo1": {"primary":{}}, - "cloudstack_instance.foo2": {"primary":{}}, - "digitalocean_droplet.foo1": {"primary":{}}, - "digitalocean_droplet.foo2": {"primary":{}}, - "digitalocean_droplet.foo3": {"primary":{}}, - "docker_container.foo1": {"primary":{}}, - "docker_container.foo2": {"primary":{}}, - "docker_container.foo3": {"primary":{}}, - "docker_container.foo4": {"primary":{}}, - "google_compute_instance.foo1": {"primary":{}}, - "google_compute_instance.foo2": {"primary":{}}, - "google_compute_instance.foo3": {"primary":{}}, - "google_compute_instance.foo4": {"primary":{}}, - "google_compute_instance.foo5": {"primary":{}}, - "heroku_app.foo1": {"primary":{}}, - "heroku_app.foo2": {"primary":{}}, - "heroku_app.foo3": {"primary":{}}, - "heroku_app.foo4": {"primary":{}}, - "heroku_app.foo5": {"primary":{}}, - "heroku_app.foo6": {"primary":{}}, - "openstack_compute_instance_v2.foo1": {"primary":{}}, - "openstack_compute_instance_v2.foo2": {"primary":{}}, - "openstack_compute_instance_v2.foo3": {"primary":{}}, - "openstack_compute_instance_v2.foo4": {"primary":{}}, - "openstack_compute_instance_v2.foo5": {"primary":{}}, - "openstack_compute_instance_v2.foo6": {"primary":{}}, - "openstack_compute_instance_v2.foo7": {"primary":{}}, - "bar": {"primary":{}}, - "baz": {"primary":{}}, - "zip": {"primary":{}} - } - }] -} diff --git a/terraform/testdata/static-validate-refs/static-validate-refs.tf b/terraform/testdata/static-validate-refs/static-validate-refs.tf index 9d945279c..e9f9344a8 100644 --- a/terraform/testdata/static-validate-refs/static-validate-refs.tf +++ b/terraform/testdata/static-validate-refs/static-validate-refs.tf @@ -1,6 +1,20 @@ +terraform { + required_providers { + boop = { + source = "foobar/beep" # intentional mismatch between local name and type + } + } +} + resource "aws_instance" "no_count" { } resource "aws_instance" "count" { count = 1 } + +resource "boop_instance" "yep" { +} + +resource "boop_whatever" "nope" { +} diff --git a/terraform/testdata/transform-create-before-destroy-basic/main.tf b/terraform/testdata/transform-create-before-destroy-basic/main.tf deleted file mode 100644 index 478c911c0..000000000 --- a/terraform/testdata/transform-create-before-destroy-basic/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "web" { - lifecycle { - create_before_destroy = true - } -} - -resource "aws_load_balancer" "lb" { - member = "${aws_instance.web.id}" -} diff --git a/terraform/testdata/transform-create-before-destroy-twice/main.tf b/terraform/testdata/transform-create-before-destroy-twice/main.tf deleted file mode 100644 index c84a7a678..000000000 --- a/terraform/testdata/transform-create-before-destroy-twice/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_lc" "foo" { - lifecycle { create_before_destroy = true } -} - -resource "aws_autoscale" "bar" { - lc = "${aws_lc.foo.id}" - - lifecycle { create_before_destroy = true } -} diff --git a/terraform/testdata/transform-destroy-basic/main.tf b/terraform/testdata/transform-destroy-basic/main.tf deleted file mode 100644 index 14bca3e82..000000000 --- a/terraform/testdata/transform-destroy-basic/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" {} - -resource "aws_instance" "bar" { - value = "${aws_instance.foo.value}" -} diff --git a/terraform/testdata/transform-destroy-depends-on/main.tf b/terraform/testdata/transform-destroy-depends-on/main.tf deleted file mode 100644 index bb81ab869..000000000 --- a/terraform/testdata/transform-destroy-depends-on/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_instance" "foo" {} - -resource "aws_instance" "bar" { - depends_on = ["aws_instance.foo"] -} diff --git a/terraform/testdata/transform-destroy-deps/main.tf b/terraform/testdata/transform-destroy-deps/main.tf deleted file mode 100644 index 1419d893c..000000000 --- a/terraform/testdata/transform-destroy-deps/main.tf +++ /dev/null @@ -1,5 +0,0 @@ -resource "aws_lc" "foo" {} - -resource "aws_asg" "bar" { - lc = "${aws_lc.foo.id}" -} diff --git a/terraform/testdata/transform-destroy-edge-splat/main.tf b/terraform/testdata/transform-destroy-edge-splat/main.tf deleted file mode 100644 index 88d8b840b..000000000 --- a/terraform/testdata/transform-destroy-edge-splat/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "test_object" "A" {} - -resource "test_object" "B" { - count = 2 - test_string = "${test_object.A.*.test_string}" -} diff --git a/terraform/testdata/transform-destroy-prefix/main.tf b/terraform/testdata/transform-destroy-prefix/main.tf deleted file mode 100644 index dd85754d4..000000000 --- a/terraform/testdata/transform-destroy-prefix/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" {} - -resource "aws_instance" "foo-bar" {} diff --git a/terraform/testdata/transform-destroy-prune-count/main.tf b/terraform/testdata/transform-destroy-prune-count/main.tf deleted file mode 100644 index 756ae10d5..000000000 --- a/terraform/testdata/transform-destroy-prune-count/main.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "aws_instance" "foo" {} - -resource "aws_instance" "bar" { - value = "${aws_instance.foo.value}" - count = "5" -} diff --git a/terraform/testdata/transform-diff-basic/main.tf b/terraform/testdata/transform-diff-basic/main.tf deleted file mode 100644 index 919f140bb..000000000 --- a/terraform/testdata/transform-diff-basic/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "foo" {} diff --git a/terraform/testdata/transform-flat-config-basic/child/main.tf b/terraform/testdata/transform-flat-config-basic/child/main.tf deleted file mode 100644 index 0c70b1b5d..000000000 --- a/terraform/testdata/transform-flat-config-basic/child/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "baz" {} diff --git a/terraform/testdata/transform-flat-config-basic/main.tf b/terraform/testdata/transform-flat-config-basic/main.tf deleted file mode 100644 index ffe0916f3..000000000 --- a/terraform/testdata/transform-flat-config-basic/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "aws_instance" "foo" {} - -resource "aws_instance" "bar" { - value = "${aws_instance.foo.value}" -} - -module "child" { - source = "./child" -} diff --git a/terraform/testdata/transform-flatten/child/main.tf b/terraform/testdata/transform-flatten/child/main.tf deleted file mode 100644 index 7371f826d..000000000 --- a/terraform/testdata/transform-flatten/child/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "var" {} - -resource "aws_instance" "child" { - value = "${var.var}" -} - -output "output" { - value = "${aws_instance.child.value}" -} diff --git a/terraform/testdata/transform-flatten/main.tf b/terraform/testdata/transform-flatten/main.tf deleted file mode 100644 index 179e151a3..000000000 --- a/terraform/testdata/transform-flatten/main.tf +++ /dev/null @@ -1,12 +0,0 @@ -module "child" { - source = "./child" - var = "${aws_instance.parent.value}" -} - -resource "aws_instance" "parent" { - value = "foo" -} - -resource "aws_instance" "parent-output" { - value = "${module.child.output}" -} diff --git a/terraform/testdata/transform-orphan-output-basic/main.tf b/terraform/testdata/transform-orphan-output-basic/main.tf deleted file mode 100644 index 70619c4e3..000000000 --- a/terraform/testdata/transform-orphan-output-basic/main.tf +++ /dev/null @@ -1 +0,0 @@ -output "foo" { value = "bar" } diff --git a/terraform/testdata/transform-provider-disable-keep/child/main.tf b/terraform/testdata/transform-provider-disable-keep/child/main.tf deleted file mode 100644 index 9d02c162c..000000000 --- a/terraform/testdata/transform-provider-disable-keep/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "value" {} - -provider "aws" { - value = "${var.value}" -} - -resource "aws_instance" "foo" {} diff --git a/terraform/testdata/transform-provider-disable-keep/main.tf b/terraform/testdata/transform-provider-disable-keep/main.tf deleted file mode 100644 index 7f9aa3f9f..000000000 --- a/terraform/testdata/transform-provider-disable-keep/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "foo" {} - -module "child" { - source = "./child" - - value = "${var.foo}" -} - -resource "aws_instance" "foo" {} diff --git a/terraform/testdata/transform-provider-disable/child/main.tf b/terraform/testdata/transform-provider-disable/child/main.tf deleted file mode 100644 index 9d02c162c..000000000 --- a/terraform/testdata/transform-provider-disable/child/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "value" {} - -provider "aws" { - value = "${var.value}" -} - -resource "aws_instance" "foo" {} diff --git a/terraform/testdata/transform-provider-disable/main.tf b/terraform/testdata/transform-provider-disable/main.tf deleted file mode 100644 index a405f9895..000000000 --- a/terraform/testdata/transform-provider-disable/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -variable "foo" {} - -module "child" { - source = "./child" - - value = "${var.foo}" -} diff --git a/terraform/testdata/transform-provider-implicit-module/main.tf b/terraform/testdata/transform-provider-implicit-module/main.tf deleted file mode 100644 index 141a04d3f..000000000 --- a/terraform/testdata/transform-provider-implicit-module/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -provider "aws" { - alias = "foo" -} - -module "mod" { - source = "./mod" - providers = { - aws = aws.foo - } -} diff --git a/terraform/testdata/transform-provider-implicit-module/mod/main.tf b/terraform/testdata/transform-provider-implicit-module/mod/main.tf deleted file mode 100644 index 01cf0803c..000000000 --- a/terraform/testdata/transform-provider-implicit-module/mod/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "aws_instance" "bar" { -} diff --git a/terraform/testdata/transform-provider-invalid/main.tf b/terraform/testdata/transform-provider-invalid/main.tf deleted file mode 100644 index ec23232ae..000000000 --- a/terraform/testdata/transform-provider-invalid/main.tf +++ /dev/null @@ -1,11 +0,0 @@ -provider "aws" { -} - -module "mod" { - source = "./mod" - - # aws.foo doesn't exist, and should report an error - providers = { - aws = aws.foo - } -} diff --git a/terraform/testdata/transform-provider-invalid/mod/main.tf b/terraform/testdata/transform-provider-invalid/mod/main.tf deleted file mode 100644 index 03641197f..000000000 --- a/terraform/testdata/transform-provider-invalid/mod/main.tf +++ /dev/null @@ -1,2 +0,0 @@ -resource "aws_resource" "foo" { -} diff --git a/terraform/testdata/transform-provisioner-prune/main.tf b/terraform/testdata/transform-provisioner-prune/main.tf deleted file mode 100644 index c78a6ecac..000000000 --- a/terraform/testdata/transform-provisioner-prune/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "web" { - provisioner "foo" {} -} diff --git a/terraform/testdata/transform-resource-count-basic/main.tf b/terraform/testdata/transform-resource-count-basic/main.tf deleted file mode 100644 index 782a9142e..000000000 --- a/terraform/testdata/transform-resource-count-basic/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo" { - count = 3 -} diff --git a/terraform/testdata/transform-resource-count-deps/main.tf b/terraform/testdata/transform-resource-count-deps/main.tf deleted file mode 100644 index c6a683e6e..000000000 --- a/terraform/testdata/transform-resource-count-deps/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "aws_instance" "foo" { - count = 2 - - provisioner "local-exec" { - command = "echo ${aws_instance.foo.0.id}" - other = "echo ${aws_instance.foo.id}" - } -} diff --git a/terraform/testdata/transform-resource-count-negative/main.tf b/terraform/testdata/transform-resource-count-negative/main.tf deleted file mode 100644 index 267e20086..000000000 --- a/terraform/testdata/transform-resource-count-negative/main.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_instance" "foo" { - count = -5 - value = "${aws_instance.foo.0.value}" -} diff --git a/terraform/testdata/transform-tainted-basic/main.tf b/terraform/testdata/transform-tainted-basic/main.tf deleted file mode 100644 index 64cbf6236..000000000 --- a/terraform/testdata/transform-tainted-basic/main.tf +++ /dev/null @@ -1 +0,0 @@ -resource "aws_instance" "web" {} diff --git a/terraform/testdata/transform-targets-destroy/main.tf b/terraform/testdata/transform-targets-destroy/main.tf deleted file mode 100644 index 5c1c285e5..000000000 --- a/terraform/testdata/transform-targets-destroy/main.tf +++ /dev/null @@ -1,26 +0,0 @@ -resource "aws_vpc" "notme" {} - -resource "aws_subnet" "notme" { - depends_on = [ - aws_vpc.notme, - ] -} - -resource "aws_instance" "me" { - depends_on = [ - aws_subnet.notme, - ] -} - -resource "aws_instance" "notme" {} -resource "aws_instance" "metoo" { - depends_on = [ - aws_instance.me, - ] -} - -resource "aws_elb" "me" { - depends_on = [ - aws_instance.me, - ] -} diff --git a/terraform/testdata/uservars-map/main.tf b/terraform/testdata/uservars-map/main.tf deleted file mode 100644 index ebe106343..000000000 --- a/terraform/testdata/uservars-map/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -variable "test_map" { - type = "map" -} diff --git a/terraform/testdata/validate-cycle/main.tf b/terraform/testdata/validate-cycle/main.tf deleted file mode 100644 index 3dc503aa7..000000000 --- a/terraform/testdata/validate-cycle/main.tf +++ /dev/null @@ -1,19 +0,0 @@ -provider "aws" { } - -/* - * When a CBD resource depends on a non-CBD resource, - * a cycle is formed that only shows up when Destroy - * nodes are included in the graph. - */ -resource "aws_security_group" "firewall" { -} - -resource "aws_instance" "web" { - security_groups = [ - "foo", - "${aws_security_group.firewall.foo}" - ] - lifecycle { - create_before_destroy = true - } -} diff --git a/terraform/testdata/validate-module-pc-inherit-orphan/main.tf b/terraform/testdata/validate-module-pc-inherit-orphan/main.tf deleted file mode 100644 index a5c34f64d..000000000 --- a/terraform/testdata/validate-module-pc-inherit-orphan/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "foo" { - default = "bar" -} - -provider "aws" { - set = "${var.foo}" -} - -resource "aws_instance" "foo" {} diff --git a/terraform/testdata/validate-resource-name-symbol/main.tf b/terraform/testdata/validate-resource-name-symbol/main.tf deleted file mode 100644 index e89401f7c..000000000 --- a/terraform/testdata/validate-resource-name-symbol/main.tf +++ /dev/null @@ -1,3 +0,0 @@ -resource "aws_instance" "foo bar" { - num = "2" -} diff --git a/terraform/testdata/validate-bad-pc-empty/main.tf b/terraform/testdata/validate-skipped-pc-empty/main.tf similarity index 100% rename from terraform/testdata/validate-bad-pc-empty/main.tf rename to terraform/testdata/validate-skipped-pc-empty/main.tf diff --git a/terraform/testdata/validate-var-map-override-old/main.tf b/terraform/testdata/validate-var-map-override-old/main.tf deleted file mode 100644 index 7fe646c8b..000000000 --- a/terraform/testdata/validate-var-map-override-old/main.tf +++ /dev/null @@ -1 +0,0 @@ -variable "foo" { default = { foo = "bar" } } diff --git a/terraform/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf b/terraform/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf new file mode 100644 index 000000000..05027f75a --- /dev/null +++ b/terraform/testdata/validate-variable-custom-validations-child-sensitive/child/child.tf @@ -0,0 +1,8 @@ +variable "test" { + type = string + + validation { + condition = var.test != "nope" + error_message = "Value must not be \"nope\"." + } +} diff --git a/terraform/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf b/terraform/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf new file mode 100644 index 000000000..4f436db11 --- /dev/null +++ b/terraform/testdata/validate-variable-custom-validations-child-sensitive/validate-variable-custom-validations.tf @@ -0,0 +1,10 @@ +variable "test" { + sensitive = true + default = "nope" +} + +module "child" { + source = "./child" + + test = var.test +} diff --git a/terraform/transform.go b/terraform/transform.go index dc615f2b1..3b4121906 100644 --- a/terraform/transform.go +++ b/terraform/transform.go @@ -23,17 +23,6 @@ type GraphVertexTransformer interface { Transform(dag.Vertex) (dag.Vertex, error) } -// GraphTransformIf is a helper function that conditionally returns a -// GraphTransformer given. This is useful for calling inline a sequence -// of transforms without having to split it up into multiple append() calls. -func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { - if f() { - return then - } - - return nil -} - type graphTransformerMulti struct { Transforms []GraphTransformer } diff --git a/terraform/transform_destroy_edge_test.go b/terraform/transform_destroy_edge_test.go index 107f26a43..4d0c63ebb 100644 --- a/terraform/transform_destroy_edge_test.go +++ b/terraform/transform_destroy_edge_test.go @@ -274,14 +274,6 @@ test_object.A (destroy) test_object.B (destroy) ` -const testTransformDestroyEdgeCreatorStr = ` -test_object.A - test_object.A (destroy) -test_object.A (destroy) - test_object.B (destroy) -test_object.B (destroy) -` - const testTransformDestroyEdgeMultiStr = ` test_object.A (destroy) test_object.B (destroy) diff --git a/terraform/transform_import_state.go b/terraform/transform_import_state.go index bbeba7aa7..966126217 100644 --- a/terraform/transform_import_state.go +++ b/terraform/transform_import_state.go @@ -22,13 +22,13 @@ func (t *ImportStateTransformer) Transform(g *Graph) error { // This is only likely to happen in misconfigured tests if t.Config == nil { - return fmt.Errorf("Cannot import into an empty configuration.") + return fmt.Errorf("cannot import into an empty configuration") } // Get the module config modCfg := t.Config.Descendent(target.Addr.Module.Module()) if modCfg == nil { - return fmt.Errorf("Module %s not found.", target.Addr.Module.Module()) + return fmt.Errorf("module %s not found", target.Addr.Module.Module()) } providerAddr := addrs.AbsProviderConfig{ @@ -116,25 +116,25 @@ func (n *graphNodeImportState) ModulePath() addrs.Module { } // GraphNodeExecutable impl. -func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) error { +func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { // Reset our states n.states = nil - provider, _, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err + provider, _, err := getProvider(ctx, n.ResolvedProvider) + diags = diags.Append(err) + if diags.HasErrors() { + return diags } // import state absAddr := n.Addr.Resource.Absolute(ctx.Path()) - var diags tfdiags.Diagnostics // Call pre-import hook - err = ctx.Hook(func(h Hook) (HookAction, error) { + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { return h.PreImportState(absAddr, n.ID) - }) - if err != nil { - return err + })) + if diags.HasErrors() { + return diags } resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ @@ -143,7 +143,7 @@ func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) error }) diags = diags.Append(resp.Diagnostics) if diags.HasErrors() { - return diags.Err() + return diags } imported := resp.ImportedResources @@ -153,10 +153,10 @@ func (n *graphNodeImportState) Execute(ctx EvalContext, op walkOperation) error n.states = imported // Call post-import hook - err = ctx.Hook(func(h Hook) (HookAction, error) { + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { return h.PostImportState(absAddr, imported) - }) - return err + })) + return diags } // GraphNodeDynamicExpandable impl. @@ -259,53 +259,28 @@ func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { } // GraphNodeExecutable impl. -func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) error { +func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { // If the Ephemeral type isn't set, then it is an error if n.State.TypeName == "" { - return fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) + diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.TargetAddr.String())) + return diags } state := n.State.AsInstanceObject() - provider, providerSchema, err := GetProvider(ctx, n.ResolvedProvider) - if err != nil { - return err + + // Refresh + riNode := &NodeAbstractResourceInstance{ + Addr: n.TargetAddr, + NodeAbstractResource: NodeAbstractResource{ + ResolvedProvider: n.ResolvedProvider, + }, + } + state, refreshDiags := riNode.refresh(ctx, state) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags } - // EvalRefresh - evalRefresh := &EvalRefresh{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - Provider: &provider, - ProviderSchema: &providerSchema, - State: &state, - Output: &state, - } - _, err = evalRefresh.Eval(ctx) - if err != nil { - return err - } - - // Verify the existance of the imported resource - if state.Value.IsNull() { - var diags tfdiags.Diagnostics - diags = diags.Append(tfdiags.Sourceless( - tfdiags.Error, - "Cannot import non-existent remote object", - fmt.Sprintf( - "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", - n.TargetAddr.Resource.String(), - ), - )) - return diags.Err() - } - - //EvalWriteState - evalWriteState := &EvalWriteState{ - Addr: n.TargetAddr.Resource, - ProviderAddr: n.ResolvedProvider, - ProviderSchema: &providerSchema, - State: &state, - } - _, err = evalWriteState.Eval(ctx) - return err + diags = diags.Append(riNode.writeResourceInstanceState(ctx, state, nil, workingState)) + return diags } diff --git a/terraform/transform_import_state_test.go b/terraform/transform_import_state_test.go index 9068a3654..84eb47f1b 100644 --- a/terraform/transform_import_state_test.go +++ b/terraform/transform_import_state_test.go @@ -14,12 +14,17 @@ import ( func TestGraphNodeImportStateExecute(t *testing.T) { state := states.NewState() provider := testProvider("aws") - provider.ImportStateReturn = []*InstanceState{ - &InstanceState{ - ID: "bar", - Ephemeral: EphemeralState{Type: "aws_instance"}, + provider.ImportResourceStateResponse = &providers.ImportResourceStateResponse{ + ImportedResources: []providers.ImportedResource{ + { + TypeName: "aws_instance", + State: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("bar"), + }), + }, }, } + ctx := &MockEvalContext{ StateState: state.SyncWrapper(), ProviderProvider: provider, @@ -41,9 +46,9 @@ func TestGraphNodeImportStateExecute(t *testing.T) { }, } - err := node.Execute(ctx, walkImport) - if err != nil { - t.Fatalf("Unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkImport) + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err()) } if len(node.states) != 1 { @@ -93,9 +98,9 @@ func TestGraphNodeImportStateSubExecute(t *testing.T) { Module: addrs.RootModule, }, } - err := node.Execute(ctx, walkImport) - if err != nil { - t.Fatalf("Unexpected error: %s", err.Error()) + diags := node.Execute(ctx, walkImport) + if diags.HasErrors() { + t.Fatalf("Unexpected error: %s", diags.Err()) } // check for resource in state diff --git a/terraform/transform_orphan_count_test.go b/terraform/transform_orphan_count_test.go index ca823d8c5..1ec725cd6 100644 --- a/terraform/transform_orphan_count_test.go +++ b/terraform/transform_orphan_count_test.go @@ -1,60 +1,51 @@ package terraform -// FIXME: Update these tests for the new OrphanResourceCountTransformer -// interface that expects to be given a list of instance addresses that -// exist in config. - -/* import ( "strings" "testing" "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/states" - "github.com/zclconf/go-cty/cty" ) func TestOrphanResourceCountTransformer(t *testing.T) { - state := MustShimLegacyState(&State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.web": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo.2": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), }, - }) + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) g := Graph{Path: addrs.RootModuleInstance} { - tf := &OrphanResourceCountTransformer{ + tf := &OrphanResourceInstanceCountTransformer{ Concrete: testOrphanResourceConcreteFunc, - Count: 1, Addr: addrs.RootModuleInstance.Resource( addrs.ManagedResourceMode, "aws_instance", "foo", ), - State: state, + InstanceAddrs: []addrs.AbsResourceInstance{mustResourceInstanceAddr("aws_instance.foo[0]")}, + State: state, } if err := tf.Transform(&g); err != nil { t.Fatalf("err: %s", err) @@ -69,46 +60,43 @@ func TestOrphanResourceCountTransformer(t *testing.T) { } func TestOrphanResourceCountTransformer_zero(t *testing.T) { - state := MustShimLegacyState(&State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.web": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo.2": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), }, - }) + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[2]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) g := Graph{Path: addrs.RootModuleInstance} { - tf := &OrphanResourceCountTransformer{ + tf := &OrphanResourceInstanceCountTransformer{ Concrete: testOrphanResourceConcreteFunc, - Count: 0, Addr: addrs.RootModuleInstance.Resource( addrs.ManagedResourceMode, "aws_instance", "foo", ), - State: state, + InstanceAddrs: []addrs.AbsResourceInstance{}, + State: state, } if err := tf.Transform(&g); err != nil { t.Fatalf("err: %s", err) @@ -122,101 +110,44 @@ func TestOrphanResourceCountTransformer_zero(t *testing.T) { } } -func TestOrphanResourceCountTransformer_oneNoIndex(t *testing.T) { - state := MustShimLegacyState(&State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.web": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo.2": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }) - - g := Graph{Path: addrs.RootModuleInstance} - - { - tf := &OrphanResourceCountTransformer{ - Concrete: testOrphanResourceConcreteFunc, - Count: 1, - Addr: addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountOneNoIndexStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - func TestOrphanResourceCountTransformer_oneIndex(t *testing.T) { - state := MustShimLegacyState(&State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.web": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo.0": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo.1": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, + state := states.NewState() + root := state.RootModule() + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.web").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), }, - }) + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[0]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) + root.SetResourceInstanceCurrent( + mustResourceInstanceAddr("aws_instance.foo[1]").Resource, + &states.ResourceInstanceObjectSrc{ + Status: states.ObjectReady, + AttrsJSON: []byte(`{"id":"foo"}`), + }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), + ) g := Graph{Path: addrs.RootModuleInstance} { - tf := &OrphanResourceCountTransformer{ + tf := &OrphanResourceInstanceCountTransformer{ Concrete: testOrphanResourceConcreteFunc, - Count: 1, Addr: addrs.RootModuleInstance.Resource( addrs.ManagedResourceMode, "aws_instance", "foo", ), - State: state, + InstanceAddrs: []addrs.AbsResourceInstance{mustResourceInstanceAddr("aws_instance.foo[0]")}, + State: state, } if err := tf.Transform(&g); err != nil { t.Fatalf("err: %s", err) @@ -230,114 +161,6 @@ func TestOrphanResourceCountTransformer_oneIndex(t *testing.T) { } } -func TestOrphanResourceCountTransformer_zeroAndNone(t *testing.T) { - state := MustShimLegacyState(&State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.web": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo.0": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }) - - g := Graph{Path: addrs.RootModuleInstance} - - { - tf := &OrphanResourceCountTransformer{ - Concrete: testOrphanResourceConcreteFunc, - Count: -1, - Addr: addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountZeroAndNoneStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestOrphanResourceCountTransformer_zeroAndNoneCount(t *testing.T) { - state := MustShimLegacyState(&State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.web": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo.0": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }) - - g := Graph{Path: addrs.RootModuleInstance} - - { - tf := &OrphanResourceCountTransformer{ - Concrete: testOrphanResourceConcreteFunc, - Count: 2, - Addr: addrs.RootModuleInstance.Resource( - addrs.ManagedResourceMode, "aws_instance", "foo", - ), - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformOrphanResourceCountZeroAndNoneCountStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - // When converting from a NoEach mode to an EachMap via a switch to for_each, // an edge is necessary to ensure that the map-key'd instances // are evaluated after the NoKey resource, because the final instance evaluated @@ -357,10 +180,7 @@ func TestOrphanResourceCountTransformer_ForEachEdgesAdded(t *testing.T) { }, Status: states.ObjectReady, }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModuleInstance, - }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), ) // NoKey'd resource @@ -376,25 +196,20 @@ func TestOrphanResourceCountTransformer_ForEachEdgesAdded(t *testing.T) { }, Status: states.ObjectReady, }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModuleInstance, - }, + mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`), ) }) g := Graph{Path: addrs.RootModuleInstance} { - tf := &OrphanResourceCountTransformer{ + tf := &OrphanResourceInstanceCountTransformer{ Concrete: testOrphanResourceConcreteFunc, - // No keys in this ForEach ensure both our resources end - // up orphaned in this test - ForEach: map[string]cty.Value{}, Addr: addrs.RootModuleInstance.Resource( addrs.ManagedResourceMode, "aws_instance", "foo", ), - State: state, + InstanceAddrs: []addrs.AbsResourceInstance{}, + State: state, } if err := tf.Transform(&g); err != nil { t.Fatalf("err: %s", err) @@ -413,11 +228,7 @@ aws_instance.foo[2] (orphan) ` const testTransformOrphanResourceCountZeroStr = ` -aws_instance.foo (orphan) -aws_instance.foo[2] (orphan) -` - -const testTransformOrphanResourceCountOneNoIndexStr = ` +aws_instance.foo[0] (orphan) aws_instance.foo[2] (orphan) ` @@ -425,17 +236,7 @@ const testTransformOrphanResourceCountOneIndexStr = ` aws_instance.foo[1] (orphan) ` -const testTransformOrphanResourceCountZeroAndNoneStr = ` -aws_instance.foo[0] (orphan) -` - -const testTransformOrphanResourceCountZeroAndNoneCountStr = ` -aws_instance.foo (orphan) -` - const testTransformOrphanResourceForEachStr = ` aws_instance.foo (orphan) aws_instance.foo["bar"] (orphan) - aws_instance.foo (orphan) ` -*/ diff --git a/terraform/transform_provider.go b/terraform/transform_provider.go index d3b2248de..f8f390b0a 100644 --- a/terraform/transform_provider.go +++ b/terraform/transform_provider.go @@ -67,6 +67,7 @@ type GraphNodeProviderConsumer interface { // ProvidedBy returns the address of the provider configuration the node // refers to, if available. The following value types may be returned: // + // nil + exact true: the node does not require a provider // * addrs.LocalProviderConfig: the provider was set in the resource config // * addrs.AbsProviderConfig + exact true: the provider configuration was // taken from the instance state. @@ -111,9 +112,14 @@ func (t *ProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { // Does the vertex _directly_ use a provider? if pv, ok := v.(GraphNodeProviderConsumer); ok { + providerAddr, exact := pv.ProvidedBy() + if providerAddr == nil && exact { + // no provider is required + continue + } + requested[v] = make(map[string]ProviderRequest) - providerAddr, exact := pv.ProvidedBy() var absPc addrs.AbsProviderConfig switch p := providerAddr.(type) { @@ -225,7 +231,6 @@ func (t *ProviderTransformer) Transform(g *Graph) error { if p, ok := target.(*graphNodeProxyProvider); ok { g.Remove(p) target = p.Target() - key = target.(GraphNodeProvider).ProviderAddr().String() } log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) @@ -250,8 +255,7 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error { cpm := make(map[string]*graphNodeCloseProvider) var err error - for _, v := range pm { - p := v.(GraphNodeProvider) + for _, p := range pm { key := p.ProviderAddr().String() // get the close provider of this type if we alread created it @@ -431,24 +435,13 @@ func providerVertexMap(g *Graph) map[string]GraphNodeProvider { return m } -func closeProviderVertexMap(g *Graph) map[string]GraphNodeCloseProvider { - m := make(map[string]GraphNodeCloseProvider) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvider); ok { - addr := pv.CloseProviderAddr() - m[addr.String()] = pv - } - } - - return m -} - type graphNodeCloseProvider struct { Addr addrs.AbsProviderConfig } var ( _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) + _ GraphNodeExecutable = (*graphNodeCloseProvider)(nil) ) func (n *graphNodeCloseProvider) Name() string { @@ -461,13 +454,8 @@ func (n *graphNodeCloseProvider) ModulePath() addrs.Module { } // GraphNodeExecutable impl. -func (n *graphNodeCloseProvider) Execute(ctx EvalContext, op walkOperation) error { - return ctx.CloseProvider(n.Addr) -} - -// GraphNodeDependable impl. -func (n *graphNodeCloseProvider) DependableName() []string { - return []string{n.Name()} +func (n *graphNodeCloseProvider) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + return diags.Append(ctx.CloseProvider(n.Addr)) } func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { @@ -617,6 +605,43 @@ func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) t.proxiable[key] = !diags.HasErrors() } + if mod.ProviderRequirements != nil { + // Add implied provider configs from the required_providers + // Since we're still treating empty configs as proxies, we can just add + // these as empty configs too. We'll ensure that these are given a + // configuration during validation to prevent them from becoming + // fully-fledged config instances. + for _, p := range mod.ProviderRequirements.RequiredProviders { + for _, aliasAddr := range p.Aliases { + addr := addrs.AbsProviderConfig{ + Provider: mod.ProviderForLocalConfig(aliasAddr), + Module: path, + Alias: aliasAddr.Alias, + } + + key := addr.String() + if _, ok := t.providers[key]; ok { + continue + } + + abstract := &NodeAbstractProvider{ + Addr: addr, + } + var v dag.Vertex + if t.Concrete != nil { + v = t.Concrete(abstract) + } else { + v = abstract + } + + // Add it to the graph + g.Add(v) + t.providers[key] = v.(GraphNodeProvider) + t.proxiable[key] = true + } + } + } + // Now replace the provider nodes with proxy nodes if a provider was being // passed in, and create implicit proxies if there was no config. Any extra // proxies will be removed in the prune step. diff --git a/terraform/transform_provider_test.go b/terraform/transform_provider_test.go index 01ff63a0a..fbc9a4187 100644 --- a/terraform/transform_provider_test.go +++ b/terraform/transform_provider_test.go @@ -434,69 +434,6 @@ func TestProviderConfigTransformer_grandparentProviders(t *testing.T) { } } -// pass a specific provider into a module using it implicitly -func TestProviderConfigTransformer_implicitModule(t *testing.T) { - mod := testModule(t, "transform-provider-implicit-module") - concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - { - tf := &AttachResourceConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - { - tf := TransformProviders([]string{"aws"}, concrete, mod) - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(`module.mod.aws_instance.bar - provider["registry.terraform.io/hashicorp/aws"].foo -provider["registry.terraform.io/hashicorp/aws"].foo`) - if actual != expected { - t.Fatalf("wrong result\n\nexpected:\n%s\n\ngot:\n%s", expected, actual) - } -} - -// error out when a non-existent provider is named in a module providers map -func TestProviderConfigTransformer_invalidProvider(t *testing.T) { - mod := testModule(t, "transform-provider-invalid") - concrete := func(a *NodeAbstractProvider) dag.Vertex { return a } - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - { - tf := &AttachResourceConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - tf := TransformProviders([]string{"aws"}, concrete, mod) - err := tf.Transform(&g) - if err == nil { - t.Fatal("expected missing provider error") - } - if !strings.Contains(err.Error(), `provider["registry.terraform.io/hashicorp/aws"].foo`) { - t.Fatalf("error should reference missing provider, got: %s", err) - } -} - const testTransformProviderBasicStr = ` aws_instance.web provider["registry.terraform.io/hashicorp/aws"] @@ -545,31 +482,6 @@ provider["registry.terraform.io/hashicorp/foo"] (close) provider["registry.terraform.io/hashicorp/foo"] ` -const testTransformDisableProviderBasicStr = ` -module.child - provider["registry.terraform.io/hashicorp/aws"] (disabled) - var.foo -provider["registry.terraform.io/hashicorp/aws"] (close) - module.child - provider["registry.terraform.io/hashicorp/aws"] (disabled) -provider["registry.terraform.io/hashicorp/aws"] (disabled) -var.foo -` - -const testTransformDisableProviderKeepStr = ` -aws_instance.foo - provider["registry.terraform.io/hashicorp/aws"] -module.child - provider["registry.terraform.io/hashicorp/aws"] - var.foo -provider["registry.terraform.io/hashicorp/aws"] -provider["registry.terraform.io/hashicorp/aws"] (close) - aws_instance.foo - module.child - provider["registry.terraform.io/hashicorp/aws"] -var.foo -` - const testTransformModuleProviderConfigStr = ` module.child.aws_instance.thing provider["registry.terraform.io/hashicorp/aws"].foo diff --git a/terraform/transform_provisioner.go b/terraform/transform_provisioner.go index 638410d4a..38e3a8ed7 100644 --- a/terraform/transform_provisioner.go +++ b/terraform/transform_provisioner.go @@ -1,179 +1,8 @@ package terraform -import ( - "fmt" - "log" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeProvisioner is an interface that nodes that can be a provisioner -// must implement. The ProvisionerName returned is the name of the provisioner -// they satisfy. -type GraphNodeProvisioner interface { - ProvisionerName() string -} - -// GraphNodeCloseProvisioner is an interface that nodes that can be a close -// provisioner must implement. The CloseProvisionerName returned is the name -// of the provisioner they satisfy. -type GraphNodeCloseProvisioner interface { - CloseProvisionerName() string -} - // GraphNodeProvisionerConsumer is an interface that nodes that require // a provisioner must implement. ProvisionedBy must return the names of the // provisioners to use. type GraphNodeProvisionerConsumer interface { ProvisionedBy() []string } - -// ProvisionerTransformer is a GraphTransformer that maps resources to -// provisioners within the graph. This will error if there are any resources -// that don't map to proper resources. -type ProvisionerTransformer struct{} - -func (t *ProvisionerTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to provisioners they need - var err error - m := provisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - if m[p] == nil { - err = multierror.Append(err, fmt.Errorf( - "%s: provisioner %s couldn't be found", - dag.VertexName(v), p)) - continue - } - - log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), p, dag.VertexName(m[p])) - g.Connect(dag.BasicEdge(v, m[p])) - } - } - } - - return err -} - -// MissingProvisionerTransformer is a GraphTransformer that adds nodes -// for missing provisioners into the graph. -type MissingProvisionerTransformer struct { - // Provisioners is the list of provisioners we support. - Provisioners []string -} - -func (t *MissingProvisionerTransformer) Transform(g *Graph) error { - // Create a set of our supported provisioners - supported := make(map[string]struct{}, len(t.Provisioners)) - for _, v := range t.Provisioners { - supported[v] = struct{}{} - } - - // Get the map of provisioners we already have in our graph - m := provisionerVertexMap(g) - - // Go through all the provisioner consumers and make sure we add - // that provisioner if it is missing. - for _, v := range g.Vertices() { - pv, ok := v.(GraphNodeProvisionerConsumer) - if !ok { - continue - } - - for _, p := range pv.ProvisionedBy() { - if _, ok := m[p]; ok { - // This provisioner already exists as a configure node - continue - } - - if _, ok := supported[p]; !ok { - // If we don't support the provisioner type, we skip it. - // Validation later will catch this as an error. - continue - } - - // Build the vertex - var newV dag.Vertex = &NodeProvisioner{ - NameValue: p, - } - - // Add the missing provisioner node to the graph - m[p] = g.Add(newV) - log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", p, dag.VertexName(v)) - } - } - - return nil -} - -// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the -// graph that will close open provisioner connections that aren't needed -// anymore. A provisioner connection is not needed anymore once all depended -// resources in the graph are evaluated. -type CloseProvisionerTransformer struct{} - -func (t *CloseProvisionerTransformer) Transform(g *Graph) error { - m := closeProvisionerVertexMap(g) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisionerConsumer); ok { - for _, p := range pv.ProvisionedBy() { - source := m[p] - - if source == nil { - // Create a new graphNodeCloseProvisioner and add it to the graph - source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} - g.Add(source) - - // Make sure we also add the new graphNodeCloseProvisioner to the map - // so we don't create and add any duplicate graphNodeCloseProvisioners. - m[p] = source - } - - g.Connect(dag.BasicEdge(source, v)) - } - } - } - - return nil -} - -func provisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProvisioner); ok { - m[pv.ProvisionerName()] = v - } - } - - return m -} - -func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeCloseProvisioner); ok { - m[pv.CloseProvisionerName()] = v - } - } - - return m -} - -type graphNodeCloseProvisioner struct { - ProvisionerNameValue string -} - -func (n *graphNodeCloseProvisioner) Name() string { - return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) -} - -// GraphNodeExecutable impl. -func (n *graphNodeCloseProvisioner) Execute(ctx EvalContext, op walkOperation) error { - return ctx.CloseProvisioner(n.ProvisionerNameValue) -} - -func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { - return n.ProvisionerNameValue -} diff --git a/terraform/transform_provisioner_test.go b/terraform/transform_provisioner_test.go deleted file mode 100644 index a6da10afc..000000000 --- a/terraform/transform_provisioner_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package terraform - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform/addrs" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/states" -) - -func TestMissingProvisionerTransformer(t *testing.T) { - mod := testModule(t, "transform-provisioner-basic") - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &MissingProvisionerTransformer{Provisioners: []string{"shell"}} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &ProvisionerTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformMissingProvisionerBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -func TestMissingProvisionerTransformer_module(t *testing.T) { - mod := testModule(t, "transform-provisioner-module") - - g := Graph{Path: addrs.RootModuleInstance} - { - concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { - return a - } - - state := states.BuildState(func(s *states.SyncState) { - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - s.SetResourceInstanceCurrent( - addrs.Resource{ - Mode: addrs.ManagedResourceMode, - Type: "aws_instance", - Name: "foo", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance.Child("child", addrs.NoKey)), - &states.ResourceInstanceObjectSrc{ - AttrsFlat: map[string]string{ - "id": "foo", - }, - Status: states.ObjectReady, - }, - addrs.AbsProviderConfig{ - Provider: addrs.NewDefaultProvider("aws"), - Module: addrs.RootModule, - }, - ) - }) - - tf := &StateTransformer{ - ConcreteCurrent: concreteResource, - State: state, - } - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - t.Logf("graph after StateTransformer:\n%s", g.StringWithNodeTypes()) - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &MissingProvisionerTransformer{Provisioners: []string{"shell"}} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - t.Logf("graph after MissingProvisionerTransformer:\n%s", g.StringWithNodeTypes()) - } - - { - transform := &ProvisionerTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - t.Logf("graph after ProvisionerTransformer:\n%s", g.StringWithNodeTypes()) - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformMissingProvisionerModuleStr) - if actual != expected { - t.Fatalf("wrong result\n\ngot:\n%s\n\nwant:\n%s", actual, expected) - } -} - -func TestCloseProvisionerTransformer(t *testing.T) { - mod := testModule(t, "transform-provisioner-basic") - - g := Graph{Path: addrs.RootModuleInstance} - { - tf := &ConfigTransformer{Config: mod} - if err := tf.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &AttachResourceConfigTransformer{Config: mod} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &MissingProvisionerTransformer{Provisioners: []string{"shell"}} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &ProvisionerTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - { - transform := &CloseProvisionerTransformer{} - if err := transform.Transform(&g); err != nil { - t.Fatalf("err: %s", err) - } - } - - actual := strings.TrimSpace(g.String()) - expected := strings.TrimSpace(testTransformCloseProvisionerBasicStr) - if actual != expected { - t.Fatalf("bad:\n\n%s", actual) - } -} - -const testTransformMissingProvisionerBasicStr = ` -aws_instance.web - provisioner.shell -provisioner.shell -` - -const testTransformMissingProvisionerModuleStr = ` -aws_instance.foo - provisioner.shell -module.child.aws_instance.foo - provisioner.shell -provisioner.shell -` - -const testTransformCloseProvisionerBasicStr = ` -aws_instance.web - provisioner.shell -provisioner.shell -provisioner.shell (close) - aws_instance.web -` diff --git a/terraform/transform_reference.go b/terraform/transform_reference.go index 0b7d0e566..3760b6c3a 100644 --- a/terraform/transform_reference.go +++ b/terraform/transform_reference.go @@ -538,17 +538,3 @@ func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Re refs, _ := lang.ReferencesInBlock(body, schema) return refs } - -func modulePrefixStr(p addrs.ModuleInstance) string { - return p.String() -} - -func modulePrefixList(result []string, prefix string) []string { - if prefix != "" { - for i, v := range result { - result[i] = fmt.Sprintf("%s.%s", prefix, v) - } - } - - return result -} diff --git a/terraform/transform_reference_test.go b/terraform/transform_reference_test.go index 069645e9e..e6bf1940b 100644 --- a/terraform/transform_reference_test.go +++ b/terraform/transform_reference_test.go @@ -68,12 +68,12 @@ func TestReferenceTransformer_path(t *testing.T) { }) g.Add(&graphNodeRefParentTest{ NameValue: "child.A", - PathValue: []string{"root", "child"}, + PathValue: addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "child"}}, Names: []string{"A"}, }) g.Add(&graphNodeRefChildTest{ NameValue: "child.B", - PathValue: []string{"root", "child"}, + PathValue: addrs.ModuleInstance{addrs.ModuleInstanceStep{Name: "child"}}, Refs: []string{"A"}, }) @@ -214,7 +214,7 @@ func TestReferenceMapReferences(t *testing.T) { type graphNodeRefParentTest struct { NameValue string - PathValue []string + PathValue addrs.ModuleInstance Names []string } @@ -233,16 +233,16 @@ func (n *graphNodeRefParentTest) ReferenceableAddrs() []addrs.Referenceable { } func (n *graphNodeRefParentTest) Path() addrs.ModuleInstance { - return normalizeModulePath(n.PathValue) + return n.PathValue } func (n *graphNodeRefParentTest) ModulePath() addrs.Module { - return normalizeModulePath(n.PathValue).Module() + return n.PathValue.Module() } type graphNodeRefChildTest struct { NameValue string - PathValue []string + PathValue addrs.ModuleInstance Refs []string } @@ -263,11 +263,11 @@ func (n *graphNodeRefChildTest) References() []*addrs.Reference { } func (n *graphNodeRefChildTest) Path() addrs.ModuleInstance { - return normalizeModulePath(n.PathValue) + return n.PathValue } func (n *graphNodeRefChildTest) ModulePath() addrs.Module { - return normalizeModulePath(n.PathValue).Module() + return n.PathValue.Module() } type graphNodeFakeResourceInstance struct { @@ -309,25 +309,6 @@ B A ` -const testTransformRefBackupStr = ` -A -B - A -` - -const testTransformRefBackupPrimaryStr = ` -A -B - C -C -` - -const testTransformRefModulePathStr = ` -A -B - A -` - const testTransformRefPathStr = ` A B diff --git a/terraform/transform_vertex.go b/terraform/transform_vertex.go index 6b1293fc2..9620e6eb8 100644 --- a/terraform/transform_vertex.go +++ b/terraform/transform_vertex.go @@ -31,7 +31,7 @@ func (t *VertexTransformer) Transform(g *Graph) error { if ok := g.Replace(v, newV); !ok { // This should never happen, big problem return fmt.Errorf( - "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", + "failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", dag.VertexName(v), dag.VertexName(newV), v, newV) } diff --git a/terraform/update_state_hook.go b/terraform/update_state_hook.go new file mode 100644 index 000000000..c2ed76e8e --- /dev/null +++ b/terraform/update_state_hook.go @@ -0,0 +1,19 @@ +package terraform + +// updateStateHook calls the PostStateUpdate hook with the current state. +func updateStateHook(ctx EvalContext) error { + // In principle we could grab the lock here just long enough to take a + // deep copy and then pass that to our hooks below, but we'll instead + // hold the hook for the duration to avoid the potential confusing + // situation of us racing to call PostStateUpdate concurrently with + // different state snapshots. + stateSync := ctx.State() + state := stateSync.Lock().DeepCopy() + defer stateSync.Unlock() + + // Call the hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostStateUpdate(state) + }) + return err +} diff --git a/terraform/update_state_hook_test.go b/terraform/update_state_hook_test.go new file mode 100644 index 000000000..71735627c --- /dev/null +++ b/terraform/update_state_hook_test.go @@ -0,0 +1,33 @@ +package terraform + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" +) + +func TestUpdateStateHook(t *testing.T) { + mockHook := new(MockHook) + + state := states.NewState() + state.Module(addrs.RootModuleInstance).SetLocalValue("foo", cty.StringVal("hello")) + + ctx := new(MockEvalContext) + ctx.HookHook = mockHook + ctx.StateState = state.SyncWrapper() + + if err := updateStateHook(ctx); err != nil { + t.Fatalf("err: %s", err) + } + + if !mockHook.PostStateUpdateCalled { + t.Fatal("should call PostStateUpdate") + } + if mockHook.PostStateUpdateState.LocalValue(addrs.LocalValue{Name: "foo"}.Absolute(addrs.RootModuleInstance)) != cty.StringVal("hello") { + t.Fatalf("wrong state passed to hook: %s", spew.Sdump(mockHook.PostStateUpdateState)) + } +} diff --git a/terraform/eval_state_upgrade.go b/terraform/upgrade_resource_state.go similarity index 95% rename from terraform/eval_state_upgrade.go rename to terraform/upgrade_resource_state.go index e03349c37..924b79ccc 100644 --- a/terraform/eval_state_upgrade.go +++ b/terraform/upgrade_resource_state.go @@ -13,14 +13,14 @@ import ( "github.com/zclconf/go-cty/cty" ) -// UpgradeResourceState will, if necessary, run the provider-defined upgrade +// upgradeResourceState will, if necessary, run the provider-defined upgrade // logic against the given state object to make it compliant with the // current schema version. This is a no-op if the given state object is // already at the latest version. // // If any errors occur during upgrade, error diagnostics are returned. In that // case it is not safe to proceed with using the original state object. -func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { +func upgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { // Remove any attributes from state that are not present in the schema. // This was previously taken care of by the provider, but data sources do // not go through the UpgradeResourceState process. @@ -42,7 +42,7 @@ func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Int // TODO: This should eventually use a proper FQN. providerType := addr.Resource.Resource.ImpliedProvider() if src.SchemaVersion > currentVersion { - log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) + log.Printf("[TRACE] upgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) var diags tfdiags.Diagnostics diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, @@ -62,9 +62,9 @@ func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Int // representation, since only the provider has enough information to // understand a flatmap built against an older schema. if src.SchemaVersion != currentVersion { - log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) + log.Printf("[TRACE] upgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) } else { - log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) + log.Printf("[TRACE] upgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) } req := providers.UpgradeResourceStateRequest{ diff --git a/terraform/eval_state_upgrade_test.go b/terraform/upgrade_resource_state_test.go similarity index 100% rename from terraform/eval_state_upgrade_test.go rename to terraform/upgrade_resource_state_test.go diff --git a/terraform/eval_validate_selfref.go b/terraform/validate_selfref.go similarity index 58% rename from terraform/eval_validate_selfref.go rename to terraform/validate_selfref.go index dd5e4018d..d00b1975b 100644 --- a/terraform/eval_validate_selfref.go +++ b/terraform/validate_selfref.go @@ -11,18 +11,10 @@ import ( "github.com/hashicorp/terraform/tfdiags" ) -// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that -// expressions within a particular referencable block do not reference that -// same block. -type EvalValidateSelfRef struct { - Addr addrs.Referenceable - Config hcl.Body - ProviderSchema **ProviderSchema -} - -func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { +// validateSelfRef checks to ensure that expressions within a particular +// referencable block do not reference that same block. +func validateSelfRef(addr addrs.Referenceable, config hcl.Body, providerSchema *ProviderSchema) tfdiags.Diagnostics { var diags tfdiags.Diagnostics - addr := n.Addr addrStrs := make([]string, 0, 1) addrStrs = append(addrStrs, addr.String()) @@ -32,11 +24,11 @@ func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { addrStrs = append(addrStrs, tAddr.ContainingResource().String()) } - if n.ProviderSchema == nil || *n.ProviderSchema == nil { - return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) + if providerSchema == nil { + diags = diags.Append(fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr)) + return diags } - providerSchema := *n.ProviderSchema var schema *configschema.Block switch tAddr := addr.(type) { case addrs.Resource: @@ -46,10 +38,11 @@ func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { } if schema == nil { - return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) + diags = diags.Append(fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr)) + return diags } - refs, _ := lang.ReferencesInBlock(n.Config, schema) + refs, _ := lang.ReferencesInBlock(config, schema) for _, ref := range refs { for _, addrStr := range addrStrs { if ref.Subject.String() == addrStr { @@ -63,5 +56,5 @@ func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { } } - return nil, diags.NonFatalErr() + return diags } diff --git a/terraform/eval_validate_selfref_test.go b/terraform/validate_selfref_test.go similarity index 84% rename from terraform/eval_validate_selfref_test.go rename to terraform/validate_selfref_test.go index de3b9a34b..8359a947b 100644 --- a/terraform/eval_validate_selfref_test.go +++ b/terraform/validate_selfref_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcltest" @@ -13,7 +12,7 @@ import ( "github.com/zclconf/go-cty/cty" ) -func TestEvalValidateSelfRef(t *testing.T) { +func TestValidateSelfRef(t *testing.T) { rAddr := addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: "aws_instance", @@ -93,16 +92,7 @@ func TestEvalValidateSelfRef(t *testing.T) { }, } - n := &EvalValidateSelfRef{ - Addr: test.Addr, - Config: body, - ProviderSchema: &ps, - } - result, err := n.Eval(nil) - if result != nil { - t.Fatal("result should always be nil") - } - diags := tfdiags.Diagnostics(nil).Append(err) + diags := validateSelfRef(test.Addr, body, ps) if diags.HasErrors() != test.Err { if test.Err { t.Errorf("unexpected success; want error") diff --git a/tfdiags/contextual.go b/tfdiags/contextual.go index d55bc2f0c..f66e5d913 100644 --- a/tfdiags/contextual.go +++ b/tfdiags/contextual.go @@ -29,13 +29,13 @@ type contextualFromConfigBody interface { // InConfigBody returns a copy of the receiver with any config-contextual // diagnostics elaborated in the context of the given body. -func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { - if len(d) == 0 { +func (diags Diagnostics) InConfigBody(body hcl.Body) Diagnostics { + if len(diags) == 0 { return nil } - ret := make(Diagnostics, len(d)) - for i, srcDiag := range d { + ret := make(Diagnostics, len(diags)) + for i, srcDiag := range diags { if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { ret[i] = cd.ElaborateFromConfigBody(body) } else { diff --git a/tfdiags/hcl.go b/tfdiags/hcl.go index 37fb0d1ae..66e3e4258 100644 --- a/tfdiags/hcl.go +++ b/tfdiags/hcl.go @@ -98,12 +98,12 @@ func (r SourceRange) ToHCL() hcl.Range { // problem, but could produce an awkward result in some special cases such // as converting the result of ConsolidateWarnings, which will force the // resulting warning groups to be flattened early. -func (d Diagnostics) ToHCL() hcl.Diagnostics { - if len(d) == 0 { +func (diags Diagnostics) ToHCL() hcl.Diagnostics { + if len(diags) == 0 { return nil } - ret := make(hcl.Diagnostics, len(d)) - for i, diag := range d { + ret := make(hcl.Diagnostics, len(diags)) + for i, diag := range diags { severity := diag.Severity() desc := diag.Description() source := diag.Source() diff --git a/tools/terraform-bundle/README.md b/tools/terraform-bundle/README.md index 916b33c1d..dc3879627 100644 --- a/tools/terraform-bundle/README.md +++ b/tools/terraform-bundle/README.md @@ -124,7 +124,7 @@ bundles contain the same core Terraform version. ## Custom Plugins To include custom plugins in the bundle file, create a local directory named -`./plugins` and put all the plugins you want to include there, under the +`./.plugins` and put all the plugins you want to include there, under the required [sub directory](#plugins-directory-layout). Optionally, you can use the `-plugin-dir` flag to specify a location where to find the plugins. To be recognized as a valid plugin, the file must have a name of the form @@ -135,10 +135,10 @@ Typically this will be `linux` and `amd64`. ### Plugins Directory Layout To include custom plugins in the bundle file, you must specify a "source" attribute in the configuration and place the plugin in the appropriate -subdirectory under `./plugins`. The directory must have the following layout: +subdirectory under `./.plugins`. The directory must have the following layout: ``` -./plugins/$SOURCEHOST/$SOURCENAMESPACE/$NAME/$VERSION/$OS_$ARCH/ +./.plugins/$SOURCEHOST/$SOURCENAMESPACE/$NAME/$VERSION/$OS_$ARCH/ ``` When installing custom plugins, you may choose any arbitrary identifier for the @@ -158,7 +158,7 @@ providers { The binary must be placed in the following directory: ``` -./plugins/example.com/myorg/customplugin/0.1/linux_amd64/ +./.plugins/example.com/myorg/customplugin/0.1/linux_amd64/ ``` ## Provider Resolution Behavior @@ -171,9 +171,7 @@ Therefore if automatic installation is not desired, it is important to ensure that version constraints within Terraform configurations do not exclude all of the versions available from the bundle. If a suitable version cannot be found in the bundle, Terraform _will_ attempt to satisfy that dependency by -automatic installation from the official repository. If you want -`terraform init` to explicitly fail instead of contacting the repository, pass -the `-get-plugins=false` option. +automatic installation from the official repository. For full details about provider resolution, see [How Terraform Works: Plugin Discovery](https://www.terraform.io/docs/extend/how-terraform-works.html#discovery). diff --git a/tools/terraform-bundle/package.go b/tools/terraform-bundle/package.go index 213059961..35eb25bbd 100644 --- a/tools/terraform-bundle/package.go +++ b/tools/terraform-bundle/package.go @@ -130,7 +130,7 @@ func (c *PackageCommand) Run(args []string) int { localSource := getproviders.NewFilesystemMirrorSource(absPluginDir) if available, err := localSource.AllAvailablePackages(); err == nil { for found := range available { - c.ui.Info(fmt.Sprintf("Found provider %q in %q. p", found.String(), pluginDir)) + c.ui.Info(fmt.Sprintf("Found provider %q in %q.", found.String(), pluginDir)) foundLocally[found] = struct{}{} } } diff --git a/version.go b/version.go index 36d16cb28..a54331647 100644 --- a/version.go +++ b/version.go @@ -4,9 +4,6 @@ import ( "github.com/hashicorp/terraform/version" ) -// The git commit that was compiled. This will be filled in by the compiler. -var GitCommit string - var Version = version.Version var VersionPrerelease = version.Prerelease diff --git a/version/version.go b/version/version.go index 8dd290be7..cd5865490 100644 --- a/version/version.go +++ b/version/version.go @@ -11,7 +11,7 @@ import ( ) // The main version number that is being run at the moment. -var Version = "0.14.0" +var Version = "0.15.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/website/docs/backends/config.html.md b/website/docs/backends/config.html.md deleted file mode 100644 index 6eddd3ffd..000000000 --- a/website/docs/backends/config.html.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -layout: "docs" -page_title: "Backends: Configuration" -sidebar_current: "docs-backends-config" -description: |- - Backends are configured directly in Terraform files in the `terraform` section. ---- - -# Backend Configuration - -Backends are configured directly in Terraform files in the `terraform` -section. After configuring a backend, it has to be -[initialized](/docs/backends/init.html). - -Below, we show a complete example configuring the "consul" backend: - -```hcl -terraform { - backend "consul" { - address = "demo.consul.io" - scheme = "https" - path = "example_app/terraform_state" - } -} -``` - -You specify the backend type as a key to the `backend` stanza. Within the -stanza are backend-specific configuration keys. The list of supported backends -and their configuration details can be found [here](/docs/backends/types/index.html). - -Only one backend may be specified and the configuration **may not contain -interpolations**. Terraform will validate this. - -## First Time Configuration - -When configuring a backend for the first time (moving from no defined backend -to explicitly configuring one), Terraform will give you the option to migrate -your state to the new backend. This lets you adopt backends without losing -any existing state. - -To be extra careful, we always recommend manually backing up your state -as well. You can do this by simply copying your `terraform.tfstate` file -to another location. The initialization process should create a backup -as well, but it never hurts to be safe! - -Configuring a backend for the first time is no different than changing -a configuration in the future: create the new configuration and run -`terraform init`. Terraform will guide you the rest of the way. - -## Partial Configuration - -You do not need to specify every required argument in the backend configuration. -Omitting certain arguments may be desirable to avoid storing secrets, such as -access keys, within the main configuration. When some or all of the arguments -are omitted, we call this a _partial configuration_. - -With a partial configuration, the remaining configuration arguments must be -provided as part of -[the initialization process](/docs/backends/init.html#backend-initialization). -There are several ways to supply the remaining arguments: - - * **Interactively**: Terraform will interactively ask you for the required - values, unless interactive input is disabled. Terraform will not prompt for - optional values. - - * **File**: A [backend configuration file](#backend-configuration-file) may be specified via the - `init` command line. To specify a file, use the `-backend-config=PATH` - option when running `terraform init`. If the file contains secrets it may be - kept in a secure data store, such as [Vault](https://www.vaultproject.io/), - in which case it must be downloaded to the local disk before running - Terraform. - - * **Command-line key/value pairs**: Key/value pairs can be specified via the - `init` command line. Note that many shells retain command-line flags in a - history file, so this isn't recommended for secrets. To specify a single - key/value pair, use the `-backend-config="KEY=VALUE"` option when running - `terraform init`. - -If backend settings are provided in multiple locations, the top-level -settings are merged such that any command-line options override the settings -in the main configuration and then the command-line options are processed -in order, with later options overriding values set by earlier options. - -The final, merged configuration is stored on disk in the `.terraform` -directory, which should be ignored from version control. This means that -sensitive information can be omitted from version control, but it will be -present in plain text on local disk when running Terraform. - -When using partial configuration, Terraform requires at a minimum that -an empty backend configuration is specified in one of the root Terraform -configuration files, to specify the backend type. For example: - -```hcl -terraform { - backend "consul" {} -} -``` - -## Backend Configuration File -A backend configuration file has the contents of the `backend` block as -top-level attributes, without the need to wrap it in another `terraform` -or `backend` block: - -```hcl -address = "demo.consul.io" -path = "example_app/terraform_state" -scheme = "https" -``` - -The same settings can alternatively be specified on the command line as -follows: - -``` -$ terraform init \ - -backend-config="address=demo.consul.io" \ - -backend-config="path=example_app/terraform_state" \ - -backend-config="scheme=https" -``` - -## Changing Configuration - -You can change your backend configuration at any time. You can change -both the configuration itself as well as the type of backend (for example -from "consul" to "s3"). - -Terraform will automatically detect any changes in your configuration -and request a [reinitialization](/docs/backends/init.html). As part of -the reinitialization process, Terraform will ask if you'd like to migrate -your existing state to the new configuration. This allows you to easily -switch from one backend to another. - -If you're using multiple [workspaces](/docs/state/workspaces.html), -Terraform can copy all workspaces to the destination. If Terraform detects -you have multiple workspaces, it will ask if this is what you want to do. - -If you're just reconfiguring the same backend, Terraform will still ask if you -want to migrate your state. You can respond "no" in this scenario. - -## Unconfiguring a Backend - -If you no longer want to use any backend, you can simply remove the -configuration from the file. Terraform will detect this like any other -change and prompt you to [reinitialize](/docs/backends/init.html). - -As part of the reinitialization, Terraform will ask if you'd like to migrate -your state back down to normal local state. Once this is complete then -Terraform is back to behaving as it does by default. diff --git a/website/docs/backends/index.html.md b/website/docs/backends/index.html.md deleted file mode 100644 index f8fb843c1..000000000 --- a/website/docs/backends/index.html.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -layout: "docs" -page_title: "Backends" -sidebar_current: "docs-backends-index" -description: |- - A "backend" in Terraform determines how state is loaded and how an operation such as `apply` is executed. This abstraction enables non-local file state storage, remote execution, etc. ---- - -# Backends - -A "backend" in Terraform determines how state is loaded and how an operation -such as `apply` is executed. This abstraction enables non-local file state -storage, remote execution, etc. - -By default, Terraform uses the "local" backend, which is the normal behavior -of Terraform you're used to. This is the backend that was being invoked -throughout the [introduction](/intro/index.html). - -Here are some of the benefits of backends: - - * **Working in a team**: Backends can store their state remotely and - protect that state with locks to prevent corruption. Some backends - such as Terraform Cloud even automatically store a history of - all state revisions. - - * **Keeping sensitive information off disk**: State is retrieved from - backends on demand and only stored in memory. If you're using a backend - such as Amazon S3, the only location the state ever is persisted is in - S3. - - * **Remote operations**: For larger infrastructures or certain changes, - `terraform apply` can take a long, long time. Some backends support - remote operations which enable the operation to execute remotely. You can - then turn off your computer and your operation will still complete. Paired - with remote state storage and locking above, this also helps in team - environments. - -**Backends are completely optional**. You can successfully use Terraform without -ever having to learn or use backends. However, they do solve pain points that -afflict teams at a certain scale. If you're an individual, you can likely -get away with never using backends. - -Even if you only intend to use the "local" backend, it may be useful to -learn about backends since you can also change the behavior of the local -backend. diff --git a/website/docs/backends/init.html.md b/website/docs/backends/init.html.md deleted file mode 100644 index cbba0eb7d..000000000 --- a/website/docs/backends/init.html.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -layout: "docs" -page_title: "Backends: Init" -sidebar_current: "docs-backends-init" -description: |- - Terraform must initialize any configured backend before use. This can be done by simply running `terraform init`. ---- - -# Backend Initialization - -Terraform must initialize any configured backend before use. This can be -done by simply running `terraform init`. - -The `terraform init` command should be run by any member of your team on -any Terraform configuration as a first step. It is safe to execute multiple -times and performs all the setup actions required for a Terraform environment, -including initializing the backend. - -The `init` command must be called: - - * On any new environment that configures a backend - * On any change of the backend configuration (including type of backend) - * On removing backend configuration completely - -You don't need to remember these exact cases. Terraform will detect when -initialization is required and error in that situation. Terraform doesn't -auto-initialize because it may require additional information from the user, -perform state migrations, etc. - -The `init` command will do more than just initialize the backend. Please see -the [init documentation](/docs/commands/init.html) for more information. diff --git a/website/docs/backends/operations.html.md b/website/docs/backends/operations.html.md deleted file mode 100644 index 0272c856b..000000000 --- a/website/docs/backends/operations.html.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -layout: "docs" -page_title: "Backends: Remote Operations (plan, apply, etc.)" -sidebar_current: "docs-backends-operations" -description: |- - Some backends support the ability to run operations (`refresh`, `plan`, `apply`, etc.) remotely. Terraform will continue to look and behave as if they're running locally while they in fact run on a remote machine. ---- - -# Remote Operations (plan, apply, etc.) - -Most backends run all operations on the local system — although Terraform stores -its state remotely with these backends, it still executes its logic locally and -makes API requests directly from the system where it was invoked. - -This is simple to understand and work with, but when many people are -collaborating on the same Terraform configurations, it requires everyone's -execution environment to be similar. This includes sharing access to -infrastructure provider credentials, keeping Terraform versions in sync, -keeping Terraform variables in sync, and installing any extra software required -by Terraform providers. This becomes more burdensome as teams get larger. - -Some backends can run operations (`plan`, `apply`, etc.) on a remote machine, -while appearing to execute locally. This enables a more consistent execution -environment and more powerful access controls, without disrupting workflows -for users who are already comfortable with running Terraform. - -Currently, [the `remote` backend](./types/remote.html) is the only backend to -support remote operations, and [Terraform Cloud](/docs/cloud/index.html) -is the only remote execution environment that supports it. For more information, see: - -- [The `remote` backend](./types/remote.html) -- [Terraform Cloud's CLI-driven run workflow](/docs/cloud/run/cli.html) diff --git a/website/docs/backends/types/gcs.html.md b/website/docs/backends/types/gcs.html.md deleted file mode 100644 index b1fb6f157..000000000 --- a/website/docs/backends/types/gcs.html.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -layout: "backend-types" -page_title: "Backend Type: gcs" -sidebar_current: "docs-backends-types-standard-gcs" -description: |- - Terraform can store the state remotely, making it easier to version and work with in a team. ---- - -# gcs - -**Kind: Standard (with locking)** - -Stores the state as an object in a configurable prefix in a pre-existing bucket on [Google Cloud Storage](https://cloud.google.com/storage/) (GCS). -This backend also supports [state locking](/docs/state/locking.html). The bucket must exist prior to configuring the backend. - -~> **Warning!** It is highly recommended that you enable -[Object Versioning](https://cloud.google.com/storage/docs/object-versioning) -on the GCS bucket to allow for state recovery in the case of accidental deletions and human error. - -## Example Configuration - -```hcl -terraform { - backend "gcs" { - bucket = "tf-state-prod" - prefix = "terraform/state" - } -} -``` - -## Data Source Configuration - -```hcl -data "terraform_remote_state" "foo" { - backend = "gcs" - config = { - bucket = "terraform-state" - prefix = "prod" - } -} - -resource "template_file" "bar" { - template = "${greeting}" - - vars { - greeting = "${data.terraform_remote_state.foo.greeting}" - } -} -``` - -## Configuration variables - -The following configuration options are supported: - - * `bucket` - (Required) The name of the GCS bucket. This name must be - globally unique. For more information, see [Bucket Naming - Guidelines](https://cloud.google.com/storage/docs/bucketnaming.html#requirements). - * `credentials` / `GOOGLE_BACKEND_CREDENTIALS` / `GOOGLE_CREDENTIALS` - - (Optional) Local path to Google Cloud Platform account credentials in JSON - format. If unset, [Google Application Default - Credentials](https://developers.google.com/identity/protocols/application-default-credentials) - are used. The provided credentials need to have the - `devstorage.read_write` scope and `WRITER` permissions on the bucket. - **Warning**: if using the Google Cloud Platform provider as well, it will - also pick up the `GOOGLE_CREDENTIALS` environment variable. - * `access_token` - (Optional) A temporary [OAuth 2.0 access token] obtained - from the Google Authorization server, i.e. the `Authorization: Bearer` token - used to authenticate HTTP requests to GCP APIs. This is an alternative to - `credentials`. If both are specified, `access_token` will be used over the - `credentials` field. - * `prefix` - (Optional) GCS prefix inside the bucket. Named states for - workspaces are stored in an object called `/.tfstate`. - * `path` - (Deprecated) GCS path to the state file of the default state. For - backwards compatibility only, use `prefix` instead. - * `encryption_key` / `GOOGLE_ENCRYPTION_KEY` - (Optional) A 32 byte base64 - encoded 'customer supplied encryption key' used to encrypt all state. For - more information see [Customer Supplied Encryption - Keys](https://cloud.google.com/storage/docs/encryption#customer-supplied). diff --git a/website/docs/backends/types/index.html.md b/website/docs/backends/types/index.html.md deleted file mode 100644 index 4034a0ae6..000000000 --- a/website/docs/backends/types/index.html.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -layout: "backend-types" -page_title: "Backend: Supported Backend Types" -sidebar_current: "docs-backends-types-index" -description: |- - Terraform can store the state remotely, making it easier to version and work with in a team. ---- - -# Backend Types - -This section documents the various backend types supported by Terraform. -If you're not familiar with backends, please -[read the sections about backends](/docs/backends/index.html) first. - -Backends may support differing levels of features in Terraform. We differentiate -these by calling a backend either **standard** or **enhanced**. All backends -must implement **standard** functionality. These are defined below: - - * **Standard**: State management, functionality covered in - [State Storage & Locking](/docs/backends/state.html) - - * **Enhanced**: Everything in standard plus - [remote operations](/docs/backends/operations.html). - -The backends are separated in the left by standard and enhanced. diff --git a/website/docs/cli-index.html.md b/website/docs/cli-index.html.md deleted file mode 100644 index 9fc22207d..000000000 --- a/website/docs/cli-index.html.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -layout: "docs" -page_title: "Documentation" -sidebar_current: "docs-home" -description: |- - Documentation for Terraform's core open source features, including the - configuration language, the commands, and the main Terraform providers. ---- - -# Terraform CLI Documentation - -Welcome to the Terraform CLI documentation! - -## What's in This Section of the Docs? - -This section contains reference documentation for Terraform's core open source -features, including the -[configuration language](/docs/configuration/index.html), the -[command-line tools](/docs/commands/index.html), and the main -[Terraform providers](/docs/providers/index.html). Use the navigation sidebar -to browse the various subsections. - -## Who is This For? - -The Terraform CLI docs are relevant to _all Terraform users,_ including open -source users and Terraform Cloud users. - -Since these docs are reference material, they are mainly written for -_intermediate and advanced users,_ who need to find complete and detailed -information quickly. - -- **New user?** Try the - [Get Started collection](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) - at HashiCorp Learn, then return - here once you've used Terraform to manage some simple resources. -- **Curious about Terraform?** See [Introduction to Terraform](/intro/index.html) - for a broad overview of what Terraform is and why people use it. - -## What's Elsewhere? - -This is not the only section of the Terraform docs! You can find out more at the -[Terraform docs home page](/docs/index.html), or you can jump between sections -using the "Other Docs" area of the navigation sidebar. diff --git a/website/docs/cli/auth/index.html.md b/website/docs/cli/auth/index.html.md new file mode 100644 index 000000000..794f23119 --- /dev/null +++ b/website/docs/cli/auth/index.html.md @@ -0,0 +1,29 @@ +--- +layout: "docs" +page_title: "Authentication - Terraform CLI" +--- + +# CLI Authentication + +> **Hands-on:** Try the [Authenticate the CLI with Terraform Cloud](https://learn.hashicorp.com/tutorials/terraform/cloud-login?in=terraform/cloud&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +[Terraform Cloud](/docs/cloud/index.html) and +[Terraform Enterprise](/docs/enterprise/index.html) are platforms that perform +Terraform runs to provision infrastructure, offering a collaboration-focused +environment that makes it easier for teams to use Terraform together. (For +expediency, the content below refers to both products as "Terraform Cloud.") + +Terraform CLI integrates with Terraform Cloud in several ways — it can be a +front-end for [CLI-driven runs](/docs/cloud/run/cli.html) in Terraform Cloud, +and can also use Terraform Cloud as a state backend and a private module +registry. All of these integrations require you to authenticate Terraform CLI +with your Terraform Cloud account. + +The best way to handle CLI authentication is with the `login` and `logout` +commands, which help automate the process of getting an API token for your +Terraform Cloud user account. + +For details, see: + +- [The `terraform login` command](/docs/cli/commands/login.html) +- [The `terraform logout` command](/docs/cli/commands/logout.html) diff --git a/website/docs/cli/code/index.html.md b/website/docs/cli/code/index.html.md new file mode 100644 index 000000000..01dba636c --- /dev/null +++ b/website/docs/cli/code/index.html.md @@ -0,0 +1,43 @@ +--- +layout: "docs" +page_title: "Writing and Modifying Code - Terraform CLI" +--- + +# Writing and Modifying Terraform Code + +The [Terraform language](/docs/language/index.html) is Terraform's primary +user interface, and all of Terraform's workflows rely on configurations written +in the Terraform language. + +Terraform CLI includes several commands to make Terraform code more convenient +to work with. Integrating these commands into your editing workflow can +potentially save you time and effort. + +- [The `terraform console` command](/docs/cli/commands/console.html) starts an + interactive shell for evaluating Terraform + [expressions](/docs/language/expressions/index.html), which can be a faster way + to verify that a particular resource argument results in the value you expect. + + +- [The `terraform fmt` command](/docs/cli/commands/fmt.html) rewrites Terraform + configuration files to a canonical format and style, so you don't have to + waste time making minor adjustments for readability and consistency. It works + well as a pre-commit hook in your version control system. + +- [The `terraform validate` command](/docs/cli/commands/validate.html) validates the + syntax and arguments of the Terraform configuration files in a directory, + including argument and attribute names and types for resources and modules. + The `plan` and `apply` commands automatically validate a configuration before + performing any other work, so `validate` isn't a crucial part of the core + workflow, but it can be very useful as a pre-commit hook or as part of a + continuous integration pipeline. + +- [The `0.13upgrade` command](/docs/cli/commands/0.13upgrade.html) and + [the `0.12upgrade` command](/docs/cli/commands/0.12upgrade.html) can automatically + modify the configuration files in a Terraform module to help deal with major + syntax changes that occurred in the 0.13 and 0.12 releases of Terraform. Both + of these commands are only available in the Terraform version they are + associated with, and you are expected to upgrade older code to be compatible + with 0.12 before attempting to make it compatible with 0.13. For more detailed + information about updating code for new Terraform versions, see the [upgrade + guides](/upgrade-guides/index.html) in the Terraform language docs. diff --git a/website/docs/commands/0.12upgrade.html.markdown b/website/docs/cli/commands/0.12upgrade.html.md similarity index 96% rename from website/docs/commands/0.12upgrade.html.markdown rename to website/docs/cli/commands/0.12upgrade.html.md index 68d45fef9..59ff0de44 100644 --- a/website/docs/commands/0.12upgrade.html.markdown +++ b/website/docs/cli/commands/0.12upgrade.html.md @@ -71,13 +71,13 @@ the change. Once upgraded the configuration will no longer be compatible with Terraform v0.11 and earlier. When upgrading a shared module that is called from multiple configurations, you may need to -[fix existing configurations to a previous version](/docs/configuration/modules.html#module-versions) +[fix existing configurations to a previous version](/docs/language/modules/syntax.html#version) to allow for a gradual upgrade. If the module is published via [a Terraform registry](/docs/registry/), assign a new _major_ version number to the upgraded module source to represent the fact that this is a breaking change for v0.11 callers. If a module is installed directly from a version control system such as Git, -[use specific revisions](https://www.terraform.io/docs/modules/sources.html#selecting-a-revision) +[use specific revisions](https://www.terraform.io/docs/language/modules/sources.html#selecting-a-revision) to control which version is used by which caller. The command-line options are all optional. The available options are: diff --git a/website/docs/commands/0.13upgrade.html.markdown b/website/docs/cli/commands/0.13upgrade.html.md similarity index 97% rename from website/docs/commands/0.13upgrade.html.markdown rename to website/docs/cli/commands/0.13upgrade.html.md index 9340ebb40..52ff42067 100644 --- a/website/docs/commands/0.13upgrade.html.markdown +++ b/website/docs/cli/commands/0.13upgrade.html.md @@ -23,7 +23,7 @@ providers are in use for a module, detect the source address for those providers where possible, and record this information in a [`required_providers` block][required-providers]. -[required-providers]: /docs/configuration/terraform.html#specifying-required-provider-versions +[required-providers]: /docs/language/providers/requirements.html ~> Note: the command ignores `.tf.json` files and override files in the module. diff --git a/website/docs/commands/apply.html.markdown b/website/docs/cli/commands/apply.html.md similarity index 86% rename from website/docs/commands/apply.html.markdown rename to website/docs/cli/commands/apply.html.md index ffcdc135e..3e1430f33 100644 --- a/website/docs/commands/apply.html.markdown +++ b/website/docs/cli/commands/apply.html.md @@ -56,27 +56,27 @@ The command-line flags are all optional. The list of available flags are: apply. * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". - Ignored when [remote state](/docs/state/remote.html) is used. This setting + Ignored when [remote state](/docs/language/state/remote.html) is used. This setting does not persist and other commands, such as init, may not be aware of the alternate statefile. To configure an alternate statefile path which is - available to all terraform commands, use the [local backend](/docs/backends/types/local.html). + available to all terraform commands, use the [local backend](/docs/language/settings/backends/local.html). * `-state-out=path` - Path to write updated state file. By default, the `-state` path will be used. Ignored when - [remote state](/docs/state/remote.html) is used. + [remote state](/docs/language/state/remote.html) is used. * `-target=resource` - A [Resource - Address](/docs/internals/resource-addressing.html) to target. For more + Address](/docs/cli/state/resource-addressing.html) to target. For more information, see - [the targeting docs from `terraform plan`](/docs/commands/plan.html#resource-targeting). + [the targeting docs from `terraform plan`](/docs/cli/commands/plan.html#resource-targeting). * `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag can be set multiple times. Variable values are interpreted as - [HCL](/docs/configuration/syntax.html#HCL), so list and map values can be - specified via this flag. + [literal expressions](/docs/language/expressions/types.html) in the + Terraform language, so list and map values can be specified via this flag. * `-var-file=foo` - Set variables in the Terraform configuration from - a [variable file](/docs/configuration/variables.html#variable-files). If + a [variable file](/docs/language/values/variables.html#variable-definitions-tfvars-files). If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, they will be automatically loaded. `terraform.tfvars` is loaded first and the `.auto.tfvars` files after in alphabetical order. Any files @@ -92,7 +92,7 @@ that directory as the root module instead of the current working directory. That usage is still supported in Terraform v0.14, but is now deprecated and we plan to remove it in Terraform v0.15. If your workflow relies on overriding the root module directory, use -[the `-chdir` global option](./#switching-working-directory-with--chdir) +[the `-chdir` global option](./#switching-working-directory-with-chdir) instead, which works across all commands and makes Terraform consistently look in the given directory for all files it would normaly read or write in the current working directory. @@ -100,6 +100,6 @@ current working directory. If your previous use of this legacy pattern was also relying on Terraform writing the `.terraform` subdirectory into the current working directory even though the root module directory was overridden, use -[the `TF_DATA_DIR` environment variable](environment-variables.html#TF_DATA_DIR) +[the `TF_DATA_DIR` environment variable](/docs/cli/config/environment-variables.html#tf_data_dir) to direct Terraform to write the `.terraform` directory to a location other than the current working directory. diff --git a/website/docs/commands/console.html.markdown b/website/docs/cli/commands/console.html.md similarity index 77% rename from website/docs/commands/console.html.markdown rename to website/docs/cli/commands/console.html.md index 511f85164..7d300078c 100644 --- a/website/docs/commands/console.html.markdown +++ b/website/docs/cli/commands/console.html.md @@ -10,27 +10,27 @@ description: |- # Command: console The `terraform console` command provides an interactive console for -evaluating [expressions](/docs/configuration/expressions.html). +evaluating [expressions](/docs/language/expressions/index.html). ## Usage Usage: `terraform console [options]` This command provides an interactive command-line console for evaluating and -experimenting with [expressions](/docs/configuration/expressions.html). +experimenting with [expressions](/docs/language/expressions/index.html). This is useful for testing interpolations before using them in configurations, and for interacting with any values currently saved in -[state](/docs/state/index.html). +[state](/docs/language/state/index.html). If the current state is empty or has not yet been created, the console can be used to experiment with the expression syntax and -[built-in functions](/docs/configuration/functions.html). +[built-in functions](/docs/language/functions/index.html). The supported options are: * `-state=path` - Path to a local state file. Expressions will be evaluated using values from this state file. If not specified, the state associated - with the current [workspace](/docs/state/workspaces.html) is used. + with the current [workspace](/docs/language/state/workspaces.html) is used. You can close the console with the `exit` command or by pressing Control-C or Control-D. @@ -50,6 +50,6 @@ $ echo "1 + 5" | terraform console ## Remote State -If [remote state](/docs/state/remote.html) is used by the current backend, +If [remote state](/docs/language/state/remote.html) is used by the current backend, Terraform will read the state for the current workspace from the backend before evaluating any expressions. diff --git a/website/docs/commands/destroy.html.markdown b/website/docs/cli/commands/destroy.html.md similarity index 87% rename from website/docs/commands/destroy.html.markdown rename to website/docs/cli/commands/destroy.html.md index f07c357a4..dd490aa63 100644 --- a/website/docs/commands/destroy.html.markdown +++ b/website/docs/cli/commands/destroy.html.md @@ -19,13 +19,13 @@ Infrastructure managed by Terraform will be destroyed. This will ask for confirmation before destroying. This command accepts all the arguments and options that the [apply -command](/docs/commands/apply.html) accepts, with the exception of a plan file +command](/docs/cli/commands/apply.html) accepts, with the exception of a plan file argument. If `-auto-approve` is set, then the destroy confirmation will not be shown. The `-target` flag, instead of affecting "dependencies" will instead also -destroy any resources that _depend on_ the target(s) specified. For more information, see [the targeting docs from `terraform plan`](/docs/commands/plan.html#resource-targeting). +destroy any resources that _depend on_ the target(s) specified. For more information, see [the targeting docs from `terraform plan`](/docs/cli/commands/plan.html#resource-targeting). The behavior of any `terraform destroy` command can be previewed at any time with an equivalent `terraform plan -destroy` command. diff --git a/website/docs/commands/env.html.markdown b/website/docs/cli/commands/env.html.md similarity index 78% rename from website/docs/commands/env.html.markdown rename to website/docs/cli/commands/env.html.md index e1ec830e7..3d1183d29 100644 --- a/website/docs/commands/env.html.markdown +++ b/website/docs/cli/commands/env.html.md @@ -9,5 +9,5 @@ description: |- # Command: env The `terraform env` command is deprecated. -[The `terraform workspace` command](/docs/commands/workspace/) +[The `terraform workspace` command](/docs/cli/commands/workspace/index.html) should be used instead. diff --git a/website/docs/commands/fmt.html.markdown b/website/docs/cli/commands/fmt.html.md similarity index 54% rename from website/docs/commands/fmt.html.markdown rename to website/docs/cli/commands/fmt.html.md index 2a3950193..b07a97df7 100644 --- a/website/docs/commands/fmt.html.markdown +++ b/website/docs/cli/commands/fmt.html.md @@ -10,7 +10,7 @@ description: |- The `terraform fmt` command is used to rewrite Terraform configuration files to a canonical format and style. This command applies a subset of -the [Terraform language style conventions](/docs/configuration/style.html), +the [Terraform language style conventions](/docs/language/syntax/style.html), along with other minor adjustments for readability. Other Terraform commands that generate Terraform configuration will produce @@ -22,6 +22,28 @@ after upgrading Terraform we recommend to proactively run `terraform fmt` on your modules along with any other changes you are making to adopt the new version. +We don't consider new formatting rules in `terraform fmt` to be a breaking +change in new versions of Terraform, but we do aim to minimize changes for +configurations that are already following the style examples shown in the +Terraform documentation. When adding new formatting rules, they will usually +aim to apply more of the rules already shown in the configuration examples +in the documentation, and so we recommend following the documented style even +for decisions that `terraform fmt` doesn't yet apply automatically. + +Formatting decisions are always subjective and so you might disagree with the +decisions that `terraform fmt` makes. This command is intentionally opinionated +and has no customization options because its primary goal is to encourage +consistency of style between different Terraform codebases, even though the +chosen style can never be be everyone's favorite. + +We recommend that you follow the style conventions applied by `terraform fmt` +when writing Terraform modules, but if you find the results particularly +objectionable then you may choose not to use this command, and possibly choose +to use a third-party formatting tool instead. If you choose to use a +third-party tool then you should also run it on files that are generated +automatically by Terraform, to get consistency between your hand-written files +and the generated files. + ## Usage Usage: `terraform fmt [options] [DIR]` diff --git a/website/docs/commands/force-unlock.html.markdown b/website/docs/cli/commands/force-unlock.html.md similarity index 100% rename from website/docs/commands/force-unlock.html.markdown rename to website/docs/cli/commands/force-unlock.html.md diff --git a/website/docs/commands/get.html.markdown b/website/docs/cli/commands/get.html.md similarity index 89% rename from website/docs/commands/get.html.markdown rename to website/docs/cli/commands/get.html.md index 80cbf4a5e..b847fecd6 100644 --- a/website/docs/commands/get.html.markdown +++ b/website/docs/cli/commands/get.html.md @@ -9,7 +9,7 @@ description: |- # Command: get The `terraform get` command is used to download and update -[modules](/docs/modules/index.html) mentioned in the root module. +[modules](/docs/language/modules/develop/index.html) mentioned in the root module. ## Usage diff --git a/website/docs/commands/graph.html.markdown b/website/docs/cli/commands/graph.html.md similarity index 82% rename from website/docs/commands/graph.html.markdown rename to website/docs/cli/commands/graph.html.md index 04c286b03..dea3d441b 100644 --- a/website/docs/commands/graph.html.markdown +++ b/website/docs/cli/commands/graph.html.md @@ -18,14 +18,14 @@ The output is in the DOT format, which can be used by Usage: `terraform graph [options]` -Outputs the visual dependency graph of Terraform resources represented by the -configuration in the current working directory. +Outputs the visual execution graph of Terraform resources according to +either the current configuration or an execution plan. The graph is outputted in DOT format. The typical program that can read this format is GraphViz, but many web services are also available to read this format. -The -type flag can be used to control the type of graph shown. Terraform +The `-type` flag can be used to control the type of graph shown. Terraform creates different graphs for different operations. See the options below for the list of types supported. The default type is "plan" if a configuration is given, and "apply" if a plan file is passed as an @@ -33,6 +33,9 @@ argument. Options: +* `-plan=tfplan` - Render graph using the specified plan file instead of the + configuration in the current directory. + * `-draw-cycles` - Highlight any cycles in the graph with colored edges. This helps when diagnosing cycle errors. @@ -48,7 +51,7 @@ The output of `terraform graph` is in the DOT format, which can easily be converted to an image by making use of `dot` provided by GraphViz: -```shell +```shellsession $ terraform graph | dot -Tsvg > graph.svg ``` diff --git a/website/docs/commands/import.html.md b/website/docs/cli/commands/import.html.md similarity index 87% rename from website/docs/commands/import.html.md rename to website/docs/cli/commands/import.html.md index 7ae356818..1406404eb 100644 --- a/website/docs/commands/import.html.md +++ b/website/docs/cli/commands/import.html.md @@ -11,7 +11,7 @@ description: |- > **Hands-on:** Try the [Import Terraform Configuration](https://learn.hashicorp.com/tutorials/terraform/state-import?in=terraform/state&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. The `terraform import` command is used to -[import existing resources](/docs/import/index.html) +[import existing resources](/docs/cli/import/index.html) into Terraform. ## Usage @@ -21,7 +21,7 @@ Usage: `terraform import [options] ADDRESS ID` Import will find the existing resource from ID and import it into your Terraform state at the given ADDRESS. -ADDRESS must be a valid [resource address](/docs/internals/resource-addressing.html). +ADDRESS must be a valid [resource address](/docs/cli/state/resource-addressing.html). Because any resource address is valid, the import command can import resources into modules as well as directly into the root of your state. @@ -37,7 +37,7 @@ itself having created all objects. If you import existing objects into Terraform be careful to import each remote object to only one Terraform resource address. If you import the same object multiple times, Terraform may exhibit unwanted behavior. For more information on this assumption, see -[the State section](/docs/state/). +[the State section](/docs/language/state/index.html). The command-line flags are all optional. The list of available flags are: @@ -75,11 +75,12 @@ in the configuration for the target resource, and that is the best behavior in m * `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag can be set multiple times. Variable values are interpreted as - [HCL](/docs/configuration/syntax.html#HCL), so list and map values can be - specified via this flag. This is only useful with the `-config` flag. + [literal expressions](/docs/language/expressions/types.html) in the + Terraform language, so list and map values can be specified via this flag. + This is only useful with the `-config` flag. * `-var-file=foo` - Set variables in the Terraform configuration from - a [variable file](/docs/configuration/variables.html#variable-files). If + a [variable file](/docs/language/values/variables.html#variable-definitions-tfvars-files). If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, they will be automatically loaded. `terraform.tfvars` is loaded first and the `.auto.tfvars` files after in alphabetical order. Any files @@ -87,6 +88,11 @@ in the configuration for the target resource, and that is the best behavior in m the working directory. This flag can be used multiple times. This is only useful with the `-config` flag. +* `-ignore-remote-version` - When using the enhanced remote backend with + Terraform Cloud, continue even if remote and local Terraform versions differ. + This may result in an unusable Terraform Cloud workspace, and should be used + with extreme caution. + ## Provider Configuration Terraform will attempt to load configuration files that configure the @@ -133,7 +139,7 @@ $ terraform import module.foo.aws_instance.bar i-abcd1234 ## Example: Import into Resource configured with count The example below will import an AWS instance into the first instance of the `aws_instance` resource named `baz` configured with -[`count`](/docs/configuration/resources.html#count-multiple-resource-instances-by-count): +[`count`](/docs/language/meta-arguments/count.html): ```shell $ terraform import 'aws_instance.baz[0]' i-abcd1234 @@ -142,7 +148,7 @@ $ terraform import 'aws_instance.baz[0]' i-abcd1234 ## Example: Import into Resource configured with for_each The example below will import an AWS instance into the `"example"` instance of the `aws_instance` resource named `baz` configured with -[`for_each`](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings): +[`for_each`](/docs/language/meta-arguments/for_each.html): Linux, Mac OS, and UNIX: diff --git a/website/docs/commands/index.html.markdown b/website/docs/cli/commands/index.html.md similarity index 96% rename from website/docs/commands/index.html.markdown rename to website/docs/cli/commands/index.html.md index 2f8ee2a4e..d579a951c 100644 --- a/website/docs/commands/index.html.markdown +++ b/website/docs/cli/commands/index.html.md @@ -1,12 +1,12 @@ --- layout: "docs" -page_title: "Commands" +page_title: "Basic CLI Features" sidebar_current: "docs-commands" description: |- Main usage information for the Terraform CLI tool. --- -# Terraform Commands (CLI) +# Basic CLI Features > **Hands-on:** Try the [Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. @@ -101,7 +101,7 @@ will be read or written in the given directory instead. There are two exceptions where Terraform will use the original working directory even when you specify `-chdir=...`: -* Settings in the [CLI Configuration](cli-config.html) are not for a specific +* Settings in the [CLI Configuration](/docs/cli/config/config-file.html) are not for a specific subcommand and Terraform processes them before acting on the `-chdir` option. @@ -154,7 +154,7 @@ Checkpoint itself can be entirely disabled for all HashiCorp products by setting the environment variable `CHECKPOINT_DISABLE` to any non-empty value. Alternatively, settings in -[the CLI configuration file](/docs/commands/cli-config.html) can be used to +[the CLI configuration file](/docs/cli/config/config-file.html) can be used to disable checkpoint features. The following checkpoint-related settings are supported in this file: diff --git a/website/docs/commands/init.html.markdown b/website/docs/cli/commands/init.html.md similarity index 87% rename from website/docs/commands/init.html.markdown rename to website/docs/cli/commands/init.html.md index dfadce182..21ef20470 100644 --- a/website/docs/commands/init.html.markdown +++ b/website/docs/cli/commands/init.html.md @@ -75,7 +75,7 @@ activating credentials) before running `terraform init`. ## Backend Initialization During init, the root configuration directory is consulted for -[backend configuration](/docs/backends/config.html) and the chosen backend +[backend configuration](/docs/language/settings/backends/configuration.html) and the chosen backend is initialized using the given configuration settings. Re-running init with an already-initialized backend will update the working @@ -91,14 +91,14 @@ when the working directory was already previously initialized for a particular backend. The `-backend-config=...` option can be used for -[partial backend configuration](/docs/backends/config.html#partial-configuration), +[partial backend configuration](/docs/language/settings/backends/configuration.html#partial-configuration), in situations where the backend settings are dynamic or sensitive and so cannot be statically specified in the configuration file. ## Child Module Installation During init, the configuration is searched for `module` blocks, and the source -code for referenced [modules](/docs/modules/) is retrieved from the locations +code for referenced [modules](/docs/language/modules/develop/index.html) is retrieved from the locations given in their `source` arguments. Re-running init with modules already installed will install the sources for @@ -123,13 +123,13 @@ third-party provider registry, `terraform init` will automatically find, download, and install the necessary provider plugins. If you cannot or do not wish to install providers from their origin registries, you can customize how Terraform installs providers using -[the provider installation settings in the CLI configuration](./cli-config.html#provider-installation). +[the provider installation settings in the CLI configuration](/docs/cli/config/config-file.html#provider-installation). For more information about specifying which providers are required for each -of your modules, see [Provider Requirements](/docs/configuration/provider-requirements.html). +of your modules, see [Provider Requirements](/docs/language/providers/requirements.html). After successful installation, Terraform writes information about the selected -providers to [the dependency lock file](/docs/configuration/dependency-lock.html). +providers to [the dependency lock file](/docs/language/dependency-lock.html). You should commit this file to your version control system to ensure that when you run `terraform init` again in future Terraform will select exactly the same provider versions. Use the `-upgrade` option if you want Terraform @@ -142,16 +142,18 @@ You can modify `terraform init`'s plugin behavior with the following options: cause Terraform to ignore any selections recorded in the dependency lock file, and to take the newest available version matching the configured version constraints. -- `-get-plugins=false` — Skip plugin installation. If you previously ran - `terraform init` without this option, the previously-installed plugins will - remain available in your current working directory. If you have not - previously run without this option, subsequent Terraform commands will - fail due to the needed provider plugins being unavailable. +- `-get-plugins=false` — Skip plugin installation. + + -> Note: Since Terraform 0.13, this option has been superseded by the + [`provider_installation`](/docs/cli/config/config-file.html#provider-installation) and + [`plugin_cache_dir`](/docs/cli/config/config-file.html#plugin_cache_dir) settings. + It should not be used in Terraform versions 0.13+, and this option + was removed in Terraform 0.15. - `-plugin-dir=PATH` — Force plugin installation to read plugins _only_ from the specified directory, as if it had been configured as a `filesystem_mirror` in the CLI configuration. If you intend to routinely use a particular filesystem mirror then we recommend - [configuring Terraform's installation methods globally](./cli-config.html#provider-installation). + [configuring Terraform's installation methods globally](/docs/cli/config/config-file.html#provider-installation). You can use `-plugin-dir` as a one-time override for exceptional situations, such as if you are testing a local build of a provider plugin you are currently developing. @@ -177,7 +179,7 @@ that directory as the root module instead of the current working directory. That usage is still supported in Terraform v0.14, but is now deprecated and we plan to remove it in Terraform v0.15. If your workflow relies on overriding the root module directory, use -[the `-chdir` global option](./#switching-working-directory-with--chdir) +[the `-chdir` global option](./#switching-working-directory-with-chdir) instead, which works across all commands and makes Terraform consistently look in the given directory for all files it would normaly read or write in the current working directory. @@ -185,6 +187,6 @@ current working directory. If your previous use of this legacy pattern was also relying on Terraform writing the `.terraform` subdirectory into the current working directory even though the root module directory was overridden, use -[the `TF_DATA_DIR` environment variable](environment-variables.html#TF_DATA_DIR) +[the `TF_DATA_DIR` environment variable](/docs/cli/config/environment-variables.html#tf_data_dir) to direct Terraform to write the `.terraform` directory to a location other than the current working directory. diff --git a/website/docs/commands/login.html.markdown b/website/docs/cli/commands/login.html.md similarity index 91% rename from website/docs/commands/login.html.markdown rename to website/docs/cli/commands/login.html.md index 085781f54..8af1ef7a4 100644 --- a/website/docs/commands/login.html.markdown +++ b/website/docs/cli/commands/login.html.md @@ -15,7 +15,7 @@ API token for Terraform Cloud, Terraform Enterprise, or any other host that offe where it is possible to launch a web browser on the same host where Terraform is running. If you are running Terraform in an unattended automation scenario, you can -[configure credentials manually in the CLI configuration](https://www.terraform.io/docs/commands/cli-config.html#credentials). +[configure credentials manually in the CLI configuration](https://www.terraform.io/docs/cli/config/config-file.html#credentials). ## Usage @@ -34,7 +34,7 @@ not as desired. If you don't wish to store your API token in the default location, you can optionally configure a -[credentials helper program](cli-config.html#credentials-helpers) which knows +[credentials helper program](/docs/cli/config/config-file.html#credentials-helpers) which knows how to store and later retrieve credentials in some other system, such as your organization's existing secrets management system. diff --git a/website/docs/commands/logout.html.markdown b/website/docs/cli/commands/logout.html.md similarity index 91% rename from website/docs/commands/logout.html.markdown rename to website/docs/cli/commands/logout.html.md index 644ff5171..15656fba9 100644 --- a/website/docs/commands/logout.html.markdown +++ b/website/docs/cli/commands/logout.html.md @@ -26,5 +26,5 @@ the remote server, so it will remain valid until manually revoked. By default, Terraform will remove the token stored in plain text in a local CLI configuration file called `credentials.tfrc.json`. If you have configured a -[credentials helper program](cli-config.html#credentials-helpers), Terraform +[credentials helper program](/docs/cli/config/config-file.html#credentials-helpers), Terraform will use the helper's `forget` command to remove it. diff --git a/website/docs/cli/commands/output.html.md b/website/docs/cli/commands/output.html.md new file mode 100644 index 000000000..1a828612d --- /dev/null +++ b/website/docs/cli/commands/output.html.md @@ -0,0 +1,123 @@ +--- +layout: "docs" +page_title: "Command: output" +sidebar_current: "docs-commands-output" +description: |- + The `terraform output` command is used to extract the value of an output variable from the state file. +--- + +# Command: output + +The `terraform output` command is used to extract the value of +an output variable from the state file. + +## Usage + +Usage: `terraform output [options] [NAME]` + +With no additional arguments, `output` will display all the outputs for +the root module. If an output `NAME` is specified, only the value of that +output is printed. + +The command-line flags are all optional. The list of available flags are: + +* `-json` - If specified, the outputs are formatted as a JSON object, with + a key per output. If `NAME` is specified, only the output specified will be + returned. This can be piped into tools such as `jq` for further processing. +* `-raw` - If specified, Terraform will convert the specified output value to a + string and print that string directly to the output, without any special + formatting. This can be convenient when working with shell scripts, but + it only supports string, number, and boolean values. Use `-json` instead + for processing complex data types. +* `-no-color` - If specified, output won't contain any color. +* `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/language/state/remote.html) is used. + +## Examples + +These examples assume the following Terraform output snippet. + +```hcl +output "instance_ips" { + value = aws_instance.web.*.public_ip +} + +output "lb_address" { + value = aws_alb.web.public_dns +} + +output "password" { + sensitive = true + value = var.secret_password +} +``` + +To list all outputs: + +```shellsession +$ terraform output +instance_ips = [ + "54.43.114.12", + "52.122.13.4", + "52.4.116.53" +] +lb_address = "my-app-alb-1657023003.us-east-1.elb.amazonaws.com" +password = +``` + +Note that outputs with the `sensitive` attribute will be redacted: + +```shellsession +$ terraform output password +password = +``` + +To query for the DNS address of the load balancer: + +```shellsession +$ terraform output lb_address +"my-app-alb-1657023003.us-east-1.elb.amazonaws.com" +``` + +To query for all instance IP addresses: + +```shellsession +$ terraform output instance_ips +instance_ips = [ + "54.43.114.12", + "52.122.13.4", + "52.4.116.53" +] +``` + +## Use in automation + +The `terraform output` command by default displays in a human-readable format, +which can change over time to improve clarity. + +For scripting and automation, use `-json` to produce the stable JSON format. +You can parse the output using a JSON command-line parser such as +[jq](https://stedolan.github.io/jq/): + +```shellsession +$ terraform output -json instance_ips | jq -r '.[0]' +54.43.114.12 +``` + +For the common case of directly using a string value in a shell script, you +can use `-raw` instead, which will print the string directly with no extra +escaping or whitespace. + +```shellsession +$ terraform output -raw lb_address +my-app-alb-1657023003.us-east-1.elb.amazonaws.com +``` + +The `-raw` option works only with values that Terraform can automatically +convert to strings. Use `-json` instead, possibly combined with `jq`, to +work with complex-typed values such as objects. + +Terraform strings are sequences of Unicode characters rather than raw bytes, +so the `-raw` output will be UTF-8 encoded when it contains non-ASCII +characters. If you need a different character encoding, use a separate command +such as `iconv` to transcode Terraform's raw output. diff --git a/website/docs/commands/plan.html.markdown b/website/docs/cli/commands/plan.html.md similarity index 90% rename from website/docs/commands/plan.html.markdown rename to website/docs/cli/commands/plan.html.md index c6a1703d4..647bedd34 100644 --- a/website/docs/commands/plan.html.markdown +++ b/website/docs/cli/commands/plan.html.md @@ -70,19 +70,19 @@ The available options are: * `-refresh=true` - Update the state prior to checking for differences. * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". - Ignored when [remote state](/docs/state/remote.html) is used. + Ignored when [remote state](/docs/language/state/remote.html) is used. * `-target=resource` - A [Resource - Address](/docs/internals/resource-addressing.html) to target. This flag can + Address](/docs/cli/state/resource-addressing.html) to target. This flag can be used multiple times. See below for more information. -* `-var=foo=bar` - Set a variable in the Terraform configuration. This flag +* `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag can be set multiple times. Variable values are interpreted as - [HCL](/docs/configuration/syntax.html#HCL), so list and map values can be - specified via this flag. + [literal expressions](/docs/language/expressions/types.html) in the + Terraform language, so list and map values can be specified via this flag. * `-var-file=foo` - Set variables in the Terraform configuration from - a [variable file](/docs/configuration/variables.html#variable-files). If + a [variable file](/docs/language/values/variables.html#variable-definitions-tfvars-files). If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, they will be automatically loaded. `terraform.tfvars` is loaded first and the `.auto.tfvars` files after in alphabetical order. Any files @@ -93,7 +93,7 @@ The available options are: The `-target` option can be used to focus Terraform's attention on only a subset of resources. -[Resource Address](/docs/internals/resource-addressing.html) syntax is used +[Resource Address](/docs/cli/state/resource-addressing.html) syntax is used to specify the constraint. The resource address is interpreted as follows: * If the given address has a _resource spec_, only the specified resource @@ -115,7 +115,7 @@ of resources relates to configuration. Instead of using `-target` as a means to operate on isolated portions of very large configurations, prefer instead to break large configurations into several smaller configurations that can each be independently applied. -[Data sources](/docs/configuration/data-sources.html) can be used to access +[Data sources](/docs/language/data-sources/index.html) can be used to access information about resources created in other configurations, allowing a complex system architecture to be broken down into more manageable parts that can be updated independently. @@ -142,7 +142,7 @@ module instead of the current working directory. That usage is still supported in Terraform v0.14, but is now deprecated and we plan to remove it in Terraform v0.15. If your workflow relies on overriding the root module directory, use -[the `-chdir` global option](./#switching-working-directory-with--chdir) +[the `-chdir` global option](./#switching-working-directory-with-chdir) instead, which works across all commands and makes Terraform consistently look in the given directory for all files it would normaly read or write in the current working directory. @@ -150,6 +150,6 @@ current working directory. If your previous use of this legacy pattern was also relying on Terraform writing the `.terraform` subdirectory into the current working directory even though the root module directory was overridden, use -[the `TF_DATA_DIR` environment variable](environment-variables.html#TF_DATA_DIR) +[the `TF_DATA_DIR` environment variable](/docs/cli/config/environment-variables.html#tf_data_dir) to direct Terraform to write the `.terraform` directory to a location other than the current working directory. diff --git a/website/docs/commands/providers.html.markdown b/website/docs/cli/commands/providers.html.md similarity index 83% rename from website/docs/commands/providers.html.markdown rename to website/docs/cli/commands/providers.html.md index d30c21303..3c3bb7d0b 100644 --- a/website/docs/commands/providers.html.markdown +++ b/website/docs/cli/commands/providers.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-providers" +layout: "docs" page_title: "Command: providers" sidebar_current: "docs-commands-providers" description: |- @@ -10,7 +10,7 @@ description: |- # Command: providers The `terraform providers` command shows information about the -[provider requirements](/docs/configuration/provider-requirements.html) of the +[provider requirements](/docs/language/providers/requirements.html) of the configuration in the current working directory, as an aid to understanding where each requirement was detected from. diff --git a/website/docs/commands/providers/lock.html.md b/website/docs/cli/commands/providers/lock.html.md similarity index 84% rename from website/docs/commands/providers/lock.html.md rename to website/docs/cli/commands/providers/lock.html.md index 25dea21e1..b9a48fae7 100644 --- a/website/docs/commands/providers/lock.html.md +++ b/website/docs/cli/commands/providers/lock.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-providers" +layout: "docs" page_title: "Command: providers lock" sidebar_current: "docs-commands-providers-lock" description: |- @@ -7,35 +7,35 @@ description: |- to the dependency lock file without initializing the referenced providers. --- -# Command: terraform providers mirror +# Command: terraform providers lock The `terraform providers lock` consults upstream registries (by default) in order to write provider dependency information into -[the dependency lock file](/docs/configuration/dependency-lock.html). +[the dependency lock file](/docs/language/dependency-lock.html). The common way to update the dependency lock file is as a side-effect of normal provider installation during -[`terraform init`](../init.html), but there are several situations where that +[`terraform init`](/docs/cli/commands/init.html), but there are several situations where that automatic approach may not be sufficient: * If you are running Terraform in an environment that uses - [alternative provider installation methods](../cli-config.html#provider-installation), + [alternative provider installation methods](/docs/cli/config/config-file.html#provider-installation), such as filesystem or network mirrors, normal provider installation will not access the origin registry for a provider and therefore Terraform will not be able to populate all of the possible package checksums for the selected provider versions. - If you use `terraform lock` to write the official release checksums for a - provider into the dependency lock file then future `terraform init` runs - will verify the packages available in your selected mirror against the - official checksums previously recorded, giving additional certainty that - the mirror is serving the provider packages it is claiming to. + If you use `terraform lock` to write the official release checksums for a + provider into the dependency lock file then future `terraform init` runs + will verify the packages available in your selected mirror against the + official checksums previously recorded, giving additional certainty that + the mirror is serving the provider packages it is claiming to. * If your team runs Terraform across a number of different platforms (e.g. on both Windows and Linux) and the upstream registry for a provider is unable to provide signed checksums using the latest hashing scheme, subsequent runs of Terraform on other platforms may - [add additional checksums to the lock file](/docs/configuration/dependency-lock.html#new-provider-package-checksums). + [add additional checksums to the lock file](/docs/language/dependency-lock.html#new-provider-package-checksums). You can avoid that by pre-populating hashes for all of the platforms you intend to use, using the `terraform providers lock` command. @@ -49,7 +49,7 @@ With no additional command line arguments, `terraform providers lock` will analyze the configuration in the current working directory to find all of the providers it depends on, and it will fetch the necessary data about those providers from their origin registries and then update -[the dependency lock file](/docs/configuration/dependency-lock.html) to +[the dependency lock file](/docs/language/dependency-lock.html) to include a selected version for each provider and all of the package checksums that are covered by the provider developer's cryptographic signature. @@ -81,14 +81,14 @@ You can customize the default behavior using the following additional option: available for the given platform and will save enough package checksums in the lock file to support _at least_ the specified platforms. - Use this option multiple times to include checksums for multiple target - systems. + Use this option multiple times to include checksums for multiple target + systems. - Target platform names consist of an operating system and a CPU - architecture. For example, `linux_amd64` selects the Linux operating system - running on an AMD64 or x86_64 CPU. + Target platform names consist of an operating system and a CPU + architecture. For example, `linux_amd64` selects the Linux operating system + running on an AMD64 or x86_64 CPU. - There is more detail on this option in the following section. + There is more detail on this option in the following section. ## Specifying Target Platforms @@ -150,7 +150,7 @@ multiple times and specify a different subset of your providers each time. The `-fs-mirror` and `-net-mirror` options have the same meaning as `filesystem_mirror` and `network_mirror` blocks in -[the provider installation methods configuration](../cli-config.html#provider-installation), +[the provider installation methods configuration](/docs/cli/config/config-file.html#provider-installation), but specify only a single method in order to be explicit about where you intend to derive the package checksum information from. diff --git a/website/docs/commands/providers/mirror.html.md b/website/docs/cli/commands/providers/mirror.html.md similarity index 95% rename from website/docs/commands/providers/mirror.html.md rename to website/docs/cli/commands/providers/mirror.html.md index 39e7b183b..b0827837e 100644 --- a/website/docs/commands/providers/mirror.html.md +++ b/website/docs/cli/commands/providers/mirror.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-providers" +layout: "docs" page_title: "Command: providers mirror" sidebar_current: "docs-commands-providers-mirror" description: |- @@ -19,7 +19,7 @@ from provider registries as part of initializing the current working directory. Sometimes Terraform is running in an environment where that isn't possible, such as on an isolated network without access to the Terraform Registry. In that case, -[explicit installation method configuration](../cli-config.html#explicit-installation-method-configuration) +[explicit installation method configuration](/docs/cli/config/config-file.html#explicit-installation-method-configuration) allows you to configure Terraform, when running on a particular system, to consult only a local filesystem directory where you've created a local mirror of the necessary plugins, and to skip accessing the upstream registry at all. diff --git a/website/docs/commands/providers/schema.html.md b/website/docs/cli/commands/providers/schema.html.md similarity index 99% rename from website/docs/commands/providers/schema.html.md rename to website/docs/cli/commands/providers/schema.html.md index 717d463eb..e97e50f23 100644 --- a/website/docs/commands/providers/schema.html.md +++ b/website/docs/cli/commands/providers/schema.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-providers" +layout: "docs" page_title: "Command: providers schema" sidebar_current: "docs-commands-providers-schema" description: |- diff --git a/website/docs/commands/push.html.markdown b/website/docs/cli/commands/push.html.md similarity index 75% rename from website/docs/commands/push.html.markdown rename to website/docs/cli/commands/push.html.md index 927920b35..bc093a85c 100644 --- a/website/docs/commands/push.html.markdown +++ b/website/docs/cli/commands/push.html.md @@ -8,7 +8,7 @@ description: |- # Command: push -!> **Important:** The `terraform push` command is no longer functional. Its functionality was replaced and surpassed by [the `remote` backend](/docs/backends/types/remote.html), which works with current versions of Terraform Cloud. The `remote` backend allows you to run remote operations directly from the command line, and displays real-time output from the remote run environment. +!> **Important:** The `terraform push` command is no longer functional. Its functionality was replaced and surpassed by [the `remote` backend](/docs/language/settings/backends/remote.html), which works with current versions of Terraform Cloud. The `remote` backend allows you to run remote operations directly from the command line, and displays real-time output from the remote run environment. The `terraform push` command was an early implementation of remote Terraform runs. It allowed teams to push a configuration to a remote run environment in a discontinued version of Terraform Enterprise. diff --git a/website/docs/commands/refresh.html.markdown b/website/docs/cli/commands/refresh.html.md similarity index 85% rename from website/docs/commands/refresh.html.markdown rename to website/docs/cli/commands/refresh.html.md index 6506cc96a..517b76f00 100644 --- a/website/docs/commands/refresh.html.markdown +++ b/website/docs/cli/commands/refresh.html.md @@ -43,24 +43,24 @@ The `terraform refresh` command accepts the following options: to 10. * `-state=path` - Path to read and write the state file to. Defaults to "terraform.tfstate". - Ignored when [remote state](/docs/state/remote.html) is used. + Ignored when [remote state](/docs/language/state/remote.html) is used. * `-state-out=path` - Path to write updated state file. By default, the `-state` path will be used. Ignored when - [remote state](/docs/state/remote.html) is used. + [remote state](/docs/language/state/remote.html) is used. * `-target=resource` - A [Resource - Address](/docs/internals/resource-addressing.html) to target. Operation will + Address](/docs/cli/state/resource-addressing.html) to target. Operation will be limited to this resource and its dependencies. This flag can be used multiple times. * `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag can be set multiple times. Variable values are interpreted as - [HCL](/docs/configuration/syntax.html#HCL), so list and map values can be - specified via this flag. + [literal expressions](/docs/language/expressions/types.html) in the + Terraform language, so list and map values can be specified via this flag. * `-var-file=foo` - Set variables in the Terraform configuration from - a [variable file](/docs/configuration/variables.html#variable-files). If + a [variable file](/docs/language/values/variables.html#variable-definitions-tfvars-files). If a `terraform.tfvars` or any `.auto.tfvars` files are present in the current directory, they will be automatically loaded. `terraform.tfvars` is loaded first and the `.auto.tfvars` files after in alphabetical order. Any files diff --git a/website/docs/commands/show.html.markdown b/website/docs/cli/commands/show.html.md similarity index 96% rename from website/docs/commands/show.html.markdown rename to website/docs/cli/commands/show.html.md index bb0cd7c41..3c78d6d00 100644 --- a/website/docs/commands/show.html.markdown +++ b/website/docs/cli/commands/show.html.md @@ -18,7 +18,7 @@ flag. -> **Note:** When using the `-json` command-line flag, any sensitive values in Terraform state will be displayed in plain text. For more information, see -[Sensitive Data in State](/docs/state/sensitive-data.html). +[Sensitive Data in State](/docs/language/state/sensitive-data.html). ## JSON Output diff --git a/website/docs/commands/state/index.html.md b/website/docs/cli/commands/state/index.html.md similarity index 91% rename from website/docs/commands/state/index.html.md rename to website/docs/cli/commands/state/index.html.md index 4c2154caa..38e2028d9 100644 --- a/website/docs/commands/state/index.html.md +++ b/website/docs/cli/commands/state/index.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-state" +layout: "docs" page_title: "Command: state" sidebar_current: "docs-commands-state-index" description: |- @@ -10,7 +10,7 @@ description: |- The `terraform state` command is used for advanced state management. As your Terraform usage becomes more advanced, there are some cases where -you may need to modify the [Terraform state](/docs/state/index.html). +you may need to modify the [Terraform state](/docs/language/state/index.html). Rather than modify the state directly, the `terraform state` commands can be used in many cases instead. @@ -35,7 +35,7 @@ written to disk and the CLI usage is the same as if it were local state. All `terraform state` subcommands that modify the state write backup files. The path of these backup file can be controlled with `-backup`. -Subcommands that are read-only (such as [list](/docs/commands/state/list.html)) +Subcommands that are read-only (such as [list](/docs/cli/commands/state/list.html)) do not write any backup files since they aren't modifying the state. Note that backups for state modification _can not be disabled_. Due to diff --git a/website/docs/commands/state/list.html.md b/website/docs/cli/commands/state/list.html.md similarity index 84% rename from website/docs/commands/state/list.html.md rename to website/docs/cli/commands/state/list.html.md index dea39e2c1..a7b78c94b 100644 --- a/website/docs/commands/state/list.html.md +++ b/website/docs/cli/commands/state/list.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-state" +layout: "docs" page_title: "Command: state list" sidebar_current: "docs-commands-state-sub-list" description: |- @@ -9,7 +9,7 @@ description: |- # Command: state list The `terraform state list` command is used to list resources within a -[Terraform state](/docs/state/index.html). +[Terraform state](/docs/language/state/index.html). ## Usage @@ -25,12 +25,12 @@ within modules are listed last. For complex infrastructures, the state can contain thousands of resources. To filter these, provide one or more patterns to the command. Patterns are -in [resource addressing format](/docs/commands/state/addressing.html). +in [resource addressing format](/docs/cli/state/resource-addressing.html). The command-line flags are all optional. The list of available flags are: * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". - Ignored when [remote state](/docs/state/remote.html) is used. + Ignored when [remote state](/docs/language/state/remote.html) is used. * `-id=id` - ID of resources to show. Ignored when unset. ## Example: All Resources @@ -57,11 +57,12 @@ aws_instance.bar[1] ## Example: Filtering by Module -This example will only list resources in the given module: +This example will list resources in the given module and any submodules: ``` $ terraform state list module.elb module.elb.aws_elb.main +module.elb.module.secgroups.aws_security_group.sg ``` ## Example: Filtering by ID diff --git a/website/docs/commands/state/mv.html.md b/website/docs/cli/commands/state/mv.html.md similarity index 88% rename from website/docs/commands/state/mv.html.md rename to website/docs/cli/commands/state/mv.html.md index bb35b1399..66fcbb764 100644 --- a/website/docs/commands/state/mv.html.md +++ b/website/docs/cli/commands/state/mv.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-state" +layout: "docs" page_title: "Command: state mv" sidebar_current: "docs-commands-state-sub-mv" description: |- @@ -9,7 +9,7 @@ description: |- # Command: state mv The `terraform state mv` command is used to move items in a -[Terraform state](/docs/state/index.html). This command can move +[Terraform state](/docs/language/state/index.html). This command can move single resources, single instances of a resource, entire modules, and more. This command can also move items to a completely different state file, enabling efficient refactoring. @@ -36,7 +36,7 @@ for each state file. This command requires a source and destination address of the item to move. Addresses are -in [resource addressing format](/docs/commands/state/addressing.html). +in [resource addressing format](/docs/cli/state/resource-addressing.html). The command-line flags are all optional. The list of available flags are: @@ -57,6 +57,11 @@ The command-line flags are all optional. The list of available flags are: isn't specified the source state file will be used. This can be a new or existing path. +* `-ignore-remote-version` - When using the enhanced remote backend with + Terraform Cloud, continue even if remote and local Terraform versions differ. + This may result in an unusable Terraform Cloud workspace, and should be used + with extreme caution. + ## Example: Rename a Resource The example below renames the `packet_device` resource named `worker` to `helper`: @@ -95,7 +100,7 @@ $ terraform state mv -state-out=other.tfstate 'module.app' 'module.app' ## Example: Move a Resource configured with count The example below moves the first instance of a `packet_device` resource named `worker` configured with -[`count`](/docs/configuration/resources.html#count-multiple-resource-instances-by-count) to +[`count`](/docs/language/meta-arguments/count.html) to the first instance of a resource named `helper` also configured with `count`: ```shell @@ -105,7 +110,7 @@ $ terraform state mv 'packet_device.worker[0]' 'packet_device.helper[0]' ## Example: Move a Resource configured with for_each The example below moves the `"example123"` instance of a `packet_device` resource named `worker` configured with -[`for_each`](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) +[`for_each`](/docs/language/meta-arguments/for_each.html) to the `"example456"` instance of a resource named `helper` also configuring `for_each`: Linux, Mac OS, and UNIX: diff --git a/website/docs/commands/state/pull.html.md b/website/docs/cli/commands/state/pull.html.md similarity index 57% rename from website/docs/commands/state/pull.html.md rename to website/docs/cli/commands/state/pull.html.md index 394881b45..c61177b0a 100644 --- a/website/docs/commands/state/pull.html.md +++ b/website/docs/cli/commands/state/pull.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-state" +layout: "docs" page_title: "Command: state pull" sidebar_current: "docs-commands-state-sub-pull" description: |- @@ -9,16 +9,21 @@ description: |- # Command: state pull The `terraform state pull` command is used to manually download and output -the state from [remote state](/docs/state/remote.html). This command also +the state from [remote state](/docs/language/state/remote.html). This command also works with local state. ## Usage Usage: `terraform state pull` -This command will download the state from its current location and -output the raw format to stdout. +This command will download the state from its current location, upgrade the +local copy to the latest state file version, and output the raw format to +stdout. This is useful for reading values out of state (potentially pairing this command with something like [jq](https://stedolan.github.io/jq/)). It is also useful if you need to make manual modifications to state. + +~> Note: This command cannot be used to inspect the Terraform version of +the remote state, as it will always be converted to the current Terraform +version before output. diff --git a/website/docs/commands/state/push.html.md b/website/docs/cli/commands/state/push.html.md similarity index 77% rename from website/docs/commands/state/push.html.md rename to website/docs/cli/commands/state/push.html.md index 13303ad2e..1481e6bfe 100644 --- a/website/docs/commands/state/push.html.md +++ b/website/docs/cli/commands/state/push.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-state" +layout: "docs" page_title: "Command: state push" sidebar_current: "docs-commands-state-sub-push" description: |- @@ -9,7 +9,7 @@ description: |- # Command: state push The `terraform state push` command is used to manually upload a local -state file to [remote state](/docs/state/remote.html). This command also +state file to [remote state](/docs/language/state/remote.html). This command also works with local state. This command should rarely be used. It is meant only as a utility in case @@ -20,7 +20,7 @@ manual intervention is necessary with the remote state. Usage: `terraform state push [options] PATH` This command will push the state specified by PATH to the currently -configured [backend](/docs/backends). +configured [backend](/docs/language/settings/backends/index.html). If PATH is "-" then the state data to push is read from stdin. This data is loaded completely into memory and verified prior to being written to @@ -42,3 +42,10 @@ making changes that appear to be unsafe: Both of these safety checks can be disabled with the `-force` flag. **This is not recommended.** If you disable the safety checks and are pushing state, the destination state will be overwritten. + +Other available flags: + +* `-ignore-remote-version` - When using the enhanced remote backend with + Terraform Cloud, continue even if remote and local Terraform versions differ. + This may result in an unusable Terraform Cloud workspace, and should be used + with extreme caution. diff --git a/website/docs/commands/state/replace-provider.html.md b/website/docs/cli/commands/state/replace-provider.html.md similarity index 82% rename from website/docs/commands/state/replace-provider.html.md rename to website/docs/cli/commands/state/replace-provider.html.md index 1219c12a2..e98373729 100644 --- a/website/docs/commands/state/replace-provider.html.md +++ b/website/docs/cli/commands/state/replace-provider.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-state" +layout: "docs" page_title: "Command: state replace-provider" sidebar_current: "docs-commands-state-sub-replace-provider" description: |- @@ -9,7 +9,7 @@ description: |- # Command: state replace-provider The `terraform state replace-provider` command is used to replace the provider -for resources in a [Terraform state](/docs/state/index.html). +for resources in a [Terraform state](/docs/language/state/index.html). ## Usage @@ -38,6 +38,11 @@ The command-line flags are all optional. The list of available flags are: * `-state=path` - Path to the source state file to read from. Defaults to the configured backend, or "terraform.tfstate". +* `-ignore-remote-version` - When using the enhanced remote backend with + Terraform Cloud, continue even if remote and local Terraform versions differ. + This may result in an unusable Terraform Cloud workspace, and should be used + with extreme caution. + ## Example The example below replaces the `hashicorp/aws` provider with a fork by `acme`, hosted at a private registry at `registry.acme.corp`: diff --git a/website/docs/commands/state/rm.html.md b/website/docs/cli/commands/state/rm.html.md similarity index 85% rename from website/docs/commands/state/rm.html.md rename to website/docs/cli/commands/state/rm.html.md index cbdfc766e..556a99d58 100644 --- a/website/docs/commands/state/rm.html.md +++ b/website/docs/cli/commands/state/rm.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-state" +layout: "docs" page_title: "Command: state rm" sidebar_current: "docs-commands-state-sub-rm" description: |- @@ -9,7 +9,7 @@ description: |- # Command: state rm The `terraform state rm` command is used to remove items from the -[Terraform state](/docs/state/index.html). This command can remove +[Terraform state](/docs/language/state/index.html). This command can remove single resources, single instances of a resource, entire modules, and more. @@ -39,7 +39,7 @@ of this command, backups are required. This command requires one or more addresses that point to a resources in the state. Addresses are -in [resource addressing format](/docs/commands/state/addressing.html). +in [resource addressing format](/docs/cli/state/resource-addressing.html). The command-line flags are all optional. The list of available flags are: @@ -51,6 +51,11 @@ The command-line flags are all optional. The list of available flags are: Terraform-managed resources. By default it will use the configured backend, or the default "terraform.tfstate" if it exists. +* `-ignore-remote-version` - When using the enhanced remote backend with + Terraform Cloud, continue even if remote and local Terraform versions differ. + This may result in an unusable Terraform Cloud workspace, and should be used + with extreme caution. + ## Example: Remove a Resource The example below removes the `packet_device` resource named `worker`: @@ -78,7 +83,7 @@ $ terraform state rm 'module.foo.packet_device.worker' ## Example: Remove a Resource configured with count The example below removes the first instance of a `packet_device` resource named `worker` configured with -[`count`](/docs/configuration/resources.html#count-multiple-resource-instances-by-count): +[`count`](/docs/language/meta-arguments/count.html): ```shell $ terraform state rm 'packet_device.worker[0]' @@ -87,7 +92,7 @@ $ terraform state rm 'packet_device.worker[0]' ## Example: Remove a Resource configured with for_each The example below removes the `"example"` instance of a `packet_device` resource named `worker` configured with -[`for_each`](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings): +[`for_each`](/docs/language/meta-arguments/for_each.html): Linux, Mac OS, and UNIX: diff --git a/website/docs/commands/state/show.html.md b/website/docs/cli/commands/state/show.html.md similarity index 82% rename from website/docs/commands/state/show.html.md rename to website/docs/cli/commands/state/show.html.md index 6cf8984a8..2c665ba64 100644 --- a/website/docs/commands/state/show.html.md +++ b/website/docs/cli/commands/state/show.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-state" +layout: "docs" page_title: "Command: state show" sidebar_current: "docs-commands-state-sub-show" description: |- @@ -10,7 +10,7 @@ description: |- The `terraform state show` command is used to show the attributes of a single resource in the -[Terraform state](/docs/state/index.html). +[Terraform state](/docs/language/state/index.html). ## Usage @@ -21,16 +21,16 @@ state file that matches the given address. This command requires an address that points to a single resource in the state. Addresses are -in [resource addressing format](/docs/commands/state/addressing.html). +in [resource addressing format](/docs/cli/state/resource-addressing.html). The command-line flags are all optional. The list of available flags are: * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". - Ignored when [remote state](/docs/state/remote.html) is used. + Ignored when [remote state](/docs/language/state/remote.html) is used. The output of `terraform state show` is intended for human consumption, not programmatic consumption. To extract state data for use in other software, use -[`terraform show -json`](../show.html#json-output) and decode the result +[`terraform show -json`](/docs/cli/commands/show.html#json-output) and decode the result using the documented structure. ## Example: Show a Resource @@ -61,7 +61,7 @@ $ terraform state show 'module.foo.packet_device.worker' ## Example: Show a Resource configured with count The example below shows the first instance of a `packet_device` resource named `worker` configured with -[`count`](/docs/configuration/resources.html#count-multiple-resource-instances-by-count): +[`count`](/docs/language/meta-arguments/count.html): ```shell $ terraform state show 'packet_device.worker[0]' @@ -70,7 +70,7 @@ $ terraform state show 'packet_device.worker[0]' ## Example: Show a Resource configured with for_each The example below shows the `"example"` instance of a `packet_device` resource named `worker` configured with -[`for_each`](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings): +[`for_each`](/docs/language/meta-arguments/for_each.html): Linux, Mac OS, and UNIX: diff --git a/website/docs/commands/taint.html.markdown b/website/docs/cli/commands/taint.html.md similarity index 84% rename from website/docs/commands/taint.html.markdown rename to website/docs/cli/commands/taint.html.md index 65c758d9c..9ef2982fa 100644 --- a/website/docs/commands/taint.html.markdown +++ b/website/docs/cli/commands/taint.html.md @@ -14,9 +14,9 @@ as tainted, forcing it to be destroyed and recreated on the next apply. This command _will not_ modify infrastructure, but does modify the state file in order to mark a resource as tainted. Once a resource is marked as tainted, the next -[plan](/docs/commands/plan.html) will show that the resource will +[plan](/docs/cli/commands/plan.html) will show that the resource will be destroyed and recreated and the next -[apply](/docs/commands/apply.html) will implement this change. +[apply](/docs/cli/commands/apply.html) will implement this change. Forcing the recreation of a resource is useful when you want a certain side effect of recreation that is not visible in the attributes of a resource. @@ -28,7 +28,7 @@ Note that tainting a resource for recreation may affect resources that depend on the newly tainted resource. For example, a DNS resource that uses the IP address of a server may need to be modified to reflect the potentially new IP address of a tainted server. The -[plan command](/docs/commands/plan.html) will show this if this is +[plan command](/docs/cli/commands/plan.html) will show this if this is the case. ## Usage @@ -37,7 +37,7 @@ Usage: `terraform taint [options] address` The `address` argument is the address of the resource to mark as tainted. The address is in -[the resource address syntax](/docs/internals/resource-addressing.html) syntax, +[the resource address syntax](/docs/cli/state/resource-addressing.html) syntax, as shown in the output from other commands, such as: * `aws_instance.foo` @@ -59,11 +59,16 @@ The command-line flags are all optional. The list of available flags are: * `-lock-timeout=0s` - Duration to retry a state lock. * `-state=path` - Path to read and write the state file to. Defaults to "terraform.tfstate". - Ignored when [remote state](/docs/state/remote.html) is used. + Ignored when [remote state](/docs/language/state/remote.html) is used. * `-state-out=path` - Path to write updated state file. By default, the `-state` path will be used. Ignored when - [remote state](/docs/state/remote.html) is used. + [remote state](/docs/language/state/remote.html) is used. + +* `-ignore-remote-version` - When using the enhanced remote backend with + Terraform Cloud, continue even if remote and local Terraform versions differ. + This may result in an unusable Terraform Cloud workspace, and should be used + with extreme caution. ## Example: Tainting a Single Resource @@ -99,7 +104,7 @@ Resource instance module.couchbase.aws_instance.cb_node[9] has been marked as ta ``` Although we recommend that most configurations use only one level of nesting -and employ [module composition](/docs/modules/composition.html), it's possible +and employ [module composition](/docs/language/modules/develop/composition.html), it's possible to have multiple levels of nested modules. In that case the resource instance address must include all of the steps to the target instance, as in the following example: diff --git a/website/docs/commands/untaint.html.markdown b/website/docs/cli/commands/untaint.html.md similarity index 83% rename from website/docs/commands/untaint.html.markdown rename to website/docs/cli/commands/untaint.html.md index c0bf081a6..69b414982 100644 --- a/website/docs/commands/untaint.html.markdown +++ b/website/docs/cli/commands/untaint.html.md @@ -43,17 +43,16 @@ certain cases, see above note). The list of available flags are: * `-lock-timeout=0s` - Duration to retry a state lock. -* `-module=path` - The module path where the resource to untaint exists. - By default this is the root path. Other modules can be specified by - a period-separated list. Example: "foo" would reference the module - "foo" but "foo.bar" would reference the "bar" module in the "foo" - module. - * `-no-color` - Disables output with coloring * `-state=path` - Path to read and write the state file to. Defaults to "terraform.tfstate". - Ignored when [remote state](/docs/state/remote.html) is used. + Ignored when [remote state](/docs/language/state/remote.html) is used. * `-state-out=path` - Path to write updated state file. By default, the `-state` path will be used. Ignored when - [remote state](/docs/state/remote.html) is used. + [remote state](/docs/language/state/remote.html) is used. + +* `-ignore-remote-version` - When using the enhanced remote backend with + Terraform Cloud, continue even if remote and local Terraform versions differ. + This may result in an unusable Terraform Cloud workspace, and should be used + with extreme caution. diff --git a/website/docs/commands/validate.html.markdown b/website/docs/cli/commands/validate.html.md similarity index 100% rename from website/docs/commands/validate.html.markdown rename to website/docs/cli/commands/validate.html.md diff --git a/website/docs/cli/commands/version.html.md b/website/docs/cli/commands/version.html.md new file mode 100644 index 000000000..713bfb6e3 --- /dev/null +++ b/website/docs/cli/commands/version.html.md @@ -0,0 +1,55 @@ +--- +layout: "docs" +page_title: "Command: version" +sidebar_current: "docs-commands-version" +description: |- + The `terraform version` command displays the version of Terraform and all installed plugins. +--- + +# Command: version + +The `terraform version` displays the current version of Terraform and all +installed plugins. + +## Usage + +Usage: `terraform version [options]` + +With no additional arguments, `version` will display the version of Terraform, +the platform it's installed on, installed providers, and the results of upgrade +and security checks [unless disabled](/docs/cli/commands/index.html#upgrade-and-security-bulletin-checks). + +This command has one optional flag: + +* `-json` - If specified, the version information is formatted as a JSON object, + and no upgrade or security information is included. + +-> **Note:** Platform information was added to the `version` command in Terraform 0.15. + +## Example + +Basic usage, with upgrade and security information shown if relevant: + +```shellsession +$ terraform version +Terraform v0.15.0 +on darwin_amd64 ++ provider registry.terraform.io/hashicorp/null v3.0.0 + +Your version of Terraform is out of date! The latest version +is X.Y.Z. You can update by downloading from https://www.terraform.io/downloads.html +``` + +As JSON: + +```shellsession +$ terraform version -json +{ + "terraform_version": "0.15.0", + "platform": "darwin_amd64", + "provider_selections": { + "registry.terraform.io/hashicorp/null": "3.0.0" + }, + "terraform_outdated": true +} +``` \ No newline at end of file diff --git a/website/docs/commands/workspace/delete.html.md b/website/docs/cli/commands/workspace/delete.html.md similarity index 85% rename from website/docs/commands/workspace/delete.html.md rename to website/docs/cli/commands/workspace/delete.html.md index f489009c1..3c99cf857 100644 --- a/website/docs/commands/workspace/delete.html.md +++ b/website/docs/cli/commands/workspace/delete.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-workspace" +layout: "docs" page_title: "Command: workspace delete" sidebar_current: "docs-commands-workspace-sub-delete" description: |- @@ -12,7 +12,7 @@ The `terraform workspace delete` command is used to delete an existing workspace ## Usage -Usage: `terraform workspace delete [NAME]` +Usage: `terraform workspace delete [OPTIONS] NAME [DIR]` This command will delete the specified workspace. @@ -30,6 +30,8 @@ from getting into this situation. The command-line flags are all optional. The only supported flag is: * `-force` - Delete the workspace even if its state is not empty. Defaults to false. +* `-lock` - Lock the state file when locking is supported. Defaults to true. +* `-lock-timeout` - Duration to retry a state lock. Default 0s. ## Example diff --git a/website/docs/commands/workspace/index.html.md b/website/docs/cli/commands/workspace/index.html.md similarity index 87% rename from website/docs/commands/workspace/index.html.md rename to website/docs/cli/commands/workspace/index.html.md index 6ec535d63..cf60ded67 100644 --- a/website/docs/commands/workspace/index.html.md +++ b/website/docs/cli/commands/workspace/index.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-workspace" +layout: "docs" page_title: "Command: workspace" sidebar_current: "docs-commands-workspace-index" description: |- @@ -9,7 +9,7 @@ description: |- # Command: workspace The `terraform workspace` command is used to manage -[workspaces](/docs/state/workspaces.html). +[workspaces](/docs/language/state/workspaces.html). This command is a container for further subcommands. These subcommands are listed in the navigation bar. diff --git a/website/docs/commands/workspace/list.html.md b/website/docs/cli/commands/workspace/list.html.md similarity index 89% rename from website/docs/commands/workspace/list.html.md rename to website/docs/cli/commands/workspace/list.html.md index a44cab8af..2114a60c6 100644 --- a/website/docs/commands/workspace/list.html.md +++ b/website/docs/cli/commands/workspace/list.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-workspace" +layout: "docs" page_title: "Command: workspace list" sidebar_current: "docs-commands-workspace-sub-list" description: |- @@ -12,7 +12,7 @@ The `terraform workspace list` command is used to list all existing workspaces. ## Usage -Usage: `terraform workspace list` +Usage: `terraform workspace list [DIR]` The command will list all existing workspaces. The current workspace is indicated using an asterisk (`*`) marker. diff --git a/website/docs/commands/workspace/new.html.md b/website/docs/cli/commands/workspace/new.html.md similarity index 75% rename from website/docs/commands/workspace/new.html.md rename to website/docs/cli/commands/workspace/new.html.md index d41473440..0caac8b08 100644 --- a/website/docs/commands/workspace/new.html.md +++ b/website/docs/cli/commands/workspace/new.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-workspace" +layout: "docs" page_title: "Command: workspace new" sidebar_current: "docs-commands-workspace-sub-new" description: |- @@ -12,7 +12,7 @@ The `terraform workspace new` command is used to create a new workspace. ## Usage -Usage: `terraform workspace new [NAME]` +Usage: `terraform workspace new [OPTIONS] NAME [DIR]` This command will create a new workspace with the given name. A workspace with this name must not already exist. @@ -20,9 +20,11 @@ this name must not already exist. If the `-state` flag is given, the state specified by the given path will be copied to initialize the state for this new workspace. -The command-line flags are all optional. The only supported flag is: +The command-line flags are all optional. The supported flags are: -* `-state=path` - Path to a state file to initialize the state of this environment. +* `-lock` - Lock the state file when locking is supported. Defaults to true. +* `-lock-timeout` - Duration to retry a state lock. Default 0s. +* `-state=path` - Path to an existing state file to initialize the state of this environment. ## Example: Create diff --git a/website/docs/commands/workspace/select.html.md b/website/docs/cli/commands/workspace/select.html.md similarity index 89% rename from website/docs/commands/workspace/select.html.md rename to website/docs/cli/commands/workspace/select.html.md index 197eb64d7..08170a61e 100644 --- a/website/docs/commands/workspace/select.html.md +++ b/website/docs/cli/commands/workspace/select.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-workspace" +layout: "docs" page_title: "Command: workspace select" sidebar_current: "docs-commands-workspace-sub-select" description: |- @@ -13,7 +13,7 @@ workspace to use for further operations. ## Usage -Usage: `terraform workspace select [NAME]` +Usage: `terraform workspace select NAME [DIR]` This command will select another workspace. The named workspace must already exist. diff --git a/website/docs/commands/workspace/show.html.md b/website/docs/cli/commands/workspace/show.html.md similarity index 94% rename from website/docs/commands/workspace/show.html.md rename to website/docs/cli/commands/workspace/show.html.md index 228bc13c0..7cd7d5b5d 100644 --- a/website/docs/commands/workspace/show.html.md +++ b/website/docs/cli/commands/workspace/show.html.md @@ -1,5 +1,5 @@ --- -layout: "commands-workspace" +layout: "docs" page_title: "Command: workspace show" sidebar_current: "docs-commands-workspace-sub-show" description: |- diff --git a/website/docs/commands/cli-config.html.markdown b/website/docs/cli/config/config-file.html.md similarity index 97% rename from website/docs/commands/cli-config.html.markdown rename to website/docs/cli/config/config-file.html.md index bd83d7e9a..97ba3a7d6 100644 --- a/website/docs/commands/cli-config.html.markdown +++ b/website/docs/cli/config/config-file.html.md @@ -11,14 +11,14 @@ description: |- The CLI configuration file configures per-user settings for CLI behaviors, which apply across all Terraform working directories. This is separate from -[your infrastructure configuration](/docs/configuration/index.html). +[your infrastructure configuration](/docs/language/index.html). ## Location The configuration is placed in a single file whose location depends on the host operating system: -* On Windows, the file must be named named `terraform.rc` and placed +* On Windows, the file must be named `terraform.rc` and placed in the relevant user's `%APPDATA%` directory. The physical location of this directory depends on your Windows version and system configuration; use `$env:APPDATA` in PowerShell to find its location on your system. @@ -33,7 +33,7 @@ as just `terraform.rc`. Use `dir` from PowerShell or Command Prompt to confirm the filename. The location of the Terraform CLI configuration file can also be specified -using the `TF_CLI_CONFIG_FILE` [environment variable](/docs/commands/environment-variables.html). +using the `TF_CLI_CONFIG_FILE` [environment variable](/docs/cli/config/environment-variables.html). ## Configuration File Syntax @@ -60,7 +60,7 @@ The following settings can be set in the CLI configuration file: See [Credentials Helpers](#credentials-helpers) below for more information. - `disable_checkpoint` — when set to `true`, disables - [upgrade and security bulletin checks](/docs/commands/index.html#upgrade-and-security-bulletin-checks) + [upgrade and security bulletin checks](/docs/cli/commands/index.html#upgrade-and-security-bulletin-checks) that require reaching out to HashiCorp-provided network services. - `disable_checkpoint_signature` — when set to `true`, allows the upgrade and @@ -93,7 +93,7 @@ credentials "app.terraform.io" { } ``` -If you are running the Terraform CLI interactively on a computer with a web browser, you can use [the `terraform login` command](./login.html) +If you are running the Terraform CLI interactively on a computer with a web browser, you can use [the `terraform login` command](/docs/cli/commands/login.html) to get credentials and automatically save them in the CLI configuration. If not, you can manually write `credentials` blocks. @@ -193,7 +193,7 @@ whose origin registry is at `example.com` can be installed only from the filesystem mirror at `/usr/share/terraform/providers`, while all other providers can be installed only directly from their origin registries. -If you set both both `include` and `exclude` for a particular installation +If you set both `include` and `exclude` for a particular installation method, the exclusion patterns take priority. For example, including `registry.terraform.io/hashicorp/*` but also excluding `registry.terraform.io/hashicorp/dns` will make that installation method apply @@ -394,7 +394,7 @@ provider_installation { With development overrides in effect, the `terraform init` command will still attempt to select a suitable published version of your provider to install and record in -[the dependency lock file](/docs/configuration/dependency-lock.html) +[the dependency lock file](/docs/language/dependency-lock.html) for future use, but other commands like `terraform apply` will disregard the lock file's entry for `hashicorp/null` and will use the given directory instead. Once your new changes are included in a @@ -412,7 +412,7 @@ files into the override directory too. You may wish to enable a development override only for shell sessions where you are actively working on provider development. If so, you can write a local CLI configuration file with content like the above in your development -directory, perhaps called `dev.tfrc` for the sake fo example, and then use the +directory, perhaps called `dev.tfrc` for the sake of example, and then use the `TF_CLI_CONFIG_FILE` environment variable to instruct Terraform to use that localized CLI configuration instead of the default one: diff --git a/website/docs/commands/environment-variables.html.md b/website/docs/cli/config/environment-variables.html.md similarity index 94% rename from website/docs/commands/environment-variables.html.md rename to website/docs/cli/config/environment-variables.html.md index ed003332e..891f8d231 100644 --- a/website/docs/commands/environment-variables.html.md +++ b/website/docs/cli/config/environment-variables.html.md @@ -59,7 +59,7 @@ export TF_VAR_alist='[1,2,3]' export TF_VAR_amap='{ foo = "bar", baz = "qux" }' ``` -For more on how to use `TF_VAR_name` in context, check out the section on [Variable Configuration](/docs/configuration/variables.html). +For more on how to use `TF_VAR_name` in context, check out the section on [Variable Configuration](/docs/language/values/variables.html). ## TF_CLI_ARGS and TF_CLI_ARGS_name @@ -114,7 +114,7 @@ export TF_WORKSPACE=your_workspace Using this environment variable is recommended only for non-interactive usage, since in a local shell environment it can be easy to forget the variable is set and apply changes to the wrong state. For more information regarding workspaces, check out the section on [Using Workspaces] -(https://www.terraform.io/docs/state/workspaces.html). +(https://www.terraform.io/docs/language/state/workspaces.html). ## TF_IN_AUTOMATION @@ -145,7 +145,7 @@ export TF_REGISTRY_CLIENT_TIMEOUT=15 ## TF_CLI_CONFIG_FILE -The location of the [Terraform CLI configuration file](/docs/commands/cli-config.html). +The location of the [Terraform CLI configuration file](/docs/cli/config/config-file.html). ```shell export TF_CLI_CONFIG_FILE="$HOME/.terraformrc-custom" @@ -159,4 +159,4 @@ If `TF_IGNORE` is set to "trace", Terraform will output debug messages to displa export TF_IGNORE=trace ``` -For more details on `.terraformignore`, please see [Excluding Files from Upload with .terraformignore](/docs/backends/types/remote.html#excluding-files-from-upload-with-terraformignore). +For more details on `.terraformignore`, please see [Excluding Files from Upload with .terraformignore](/docs/language/settings/backends/remote.html#excluding-files-from-upload-with-terraformignore). diff --git a/website/docs/cli/config/index.html.md b/website/docs/cli/config/index.html.md new file mode 100644 index 000000000..dbac33b53 --- /dev/null +++ b/website/docs/cli/config/index.html.md @@ -0,0 +1,22 @@ +--- +layout: "docs" +page_title: "CLI Configuration - Terraform CLI" +--- + +# CLI Configuration + +Terraform CLI can be configured with some global settings, which are separate +from any Terraform configuration and which apply across all working directories. + +We've designed Terraform such that an average user running Terraform CLI +interactively will not need to interact with any of these settings. As a result, +most of the global settings relate to advanced or automated workflows, or +unusual environmental conditions like running Terraform on an airgapped +instance. + +- The [CLI config file](/docs/cli/config/config-file.html) configures provider + installation and security features. +- Several [environment variables](/docs/cli/config/environment-variables.html) can + configure Terraform's inputs and outputs; this includes some alternate ways to + provide information that is usually passed on the command line or read from + the state of the shell. diff --git a/website/docs/import/importability.html.md b/website/docs/cli/import/importability.html.md similarity index 88% rename from website/docs/import/importability.html.md rename to website/docs/cli/import/importability.html.md index dde7cb682..79d78d90a 100644 --- a/website/docs/import/importability.html.md +++ b/website/docs/cli/import/importability.html.md @@ -20,5 +20,5 @@ Converting a resource to be importable is also relatively simple, so if you're interested in contributing that functionality, the Terraform team would be grateful. -To make a resource importable, please see the -[plugin documentation on writing a resource](/docs/plugins/provider.html). +To make a resource importable, please see +[Extending Terraform: Resources — Import](/docs/extend/resources/import.html). diff --git a/website/docs/import/index.html.md b/website/docs/cli/import/index.html.md similarity index 90% rename from website/docs/import/index.html.md rename to website/docs/cli/import/index.html.md index 4be535bba..10eb64fb6 100644 --- a/website/docs/import/index.html.md +++ b/website/docs/cli/import/index.html.md @@ -26,12 +26,12 @@ itself having created all objects. If you import existing objects into Terraform be careful to import each remote object to only one Terraform resource address. If you import the same object multiple times, Terraform may exhibit unwanted behavior. For more information on this assumption, see -[the State section](/docs/state/). +[the State section](/docs/language/state/index.html). ## Currently State Only The current implementation of Terraform import can only import resources -into the [state](/docs/state). It does not generate configuration. A future +into the [state](/docs/language/state/index.html). It does not generate configuration. A future version of Terraform will also generate configuration. Because of this, prior to running `terraform import` it is necessary to write @@ -44,7 +44,7 @@ importing existing resources. ## Remote Backends When using Terraform import on the command line with a [remote -backend](/docs/backends/types/remote.html), such as Terraform Cloud, the import +backend](/docs/language/settings/backends/remote.html), such as Terraform Cloud, the import command runs locally, unlike commands such as apply, which run inside your Terraform Cloud environment. Because of this, the import command will not have access to information from the remote backend, such as workspace variables. diff --git a/website/docs/import/usage.html.md b/website/docs/cli/import/usage.html.md similarity index 95% rename from website/docs/import/usage.html.md rename to website/docs/cli/import/usage.html.md index 57fdb4d76..4fdc5452d 100644 --- a/website/docs/import/usage.html.md +++ b/website/docs/cli/import/usage.html.md @@ -23,7 +23,7 @@ itself having created all objects. If you import existing objects into Terraform be careful to import each remote object to only one Terraform resource address. If you import the same object multiple times, Terraform may exhibit unwanted behavior. For more information on this assumption, see -[the State section](/docs/state/). +[the State section](/docs/language/state/index.html). To import a resource, first write a resource block for it in your configuration, establishing the name by which it will be known to Terraform: @@ -57,7 +57,7 @@ Terraform state. It is also possible to import to resources in child modules, using their paths, and to single instances of a resource with `count` or `for_each` set. See -[_Resource Addressing_](/docs/internals/resource-addressing.html) for more +[_Resource Addressing_](/docs/cli/state/resource-addressing.html) for more details on how to specify a target resource. The syntax of the given ID is dependent on the resource type being imported. @@ -83,4 +83,4 @@ a `resource` block in configuration for each secondary resource. If this is not done, Terraform will plan to destroy the imported objects on the next run. If you want to rename or otherwise move the imported resources, the -[state management commands](/docs/commands/state/index.html) can be used. +[state management commands](/docs/cli/commands/state/index.html) can be used. diff --git a/website/docs/cli/index.html.md b/website/docs/cli/index.html.md new file mode 100644 index 000000000..6d66974e5 --- /dev/null +++ b/website/docs/cli/index.html.md @@ -0,0 +1,20 @@ +--- +layout: "docs" +page_title: "Terraform CLI Documentation" +sidebar_current: "docs-home" +description: |- + Documentation for Terraform's CLI-based workflows. +--- + +# Terraform CLI Documentation + +> **Hands-on:** Try the [Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + +This is the documentation for Terraform CLI. It is relevant to anyone working +with Terraform's CLI-based workflows; this includes people who use Terraform CLI +by itself, as well as those who use Terraform CLI in conjunction with Terraform +Cloud or Terraform Enterprise. + +Notably, this documentation does not cover the syntax and usage of the Terraform +language. For that, see the +[Terraform Language Documentation](/docs/language/index.html). diff --git a/website/docs/cli/init/index.html.md b/website/docs/cli/init/index.html.md new file mode 100644 index 000000000..ee6372b0f --- /dev/null +++ b/website/docs/cli/init/index.html.md @@ -0,0 +1,71 @@ +--- +layout: "docs" +page_title: "Initializing Working Directories - Terraform CLI" +--- + +# Initializing Working Directories + +Terraform expects to be invoked from a working directory that contains +configuration files written in +[the Terraform language](/docs/language/index.html). Terraform uses +configuration content from this directory, and also uses the directory to store +settings, cached plugins and modules, and sometimes state data. + +A working directory must be initialized before Terraform can perform any +operations in it (like provisioning infrastructure or modifying state). + +## Working Directory Contents + +A Terraform working directory typically contains: + +- A Terraform configuration describing resources Terraform should manage. This + configuration is expected to change over time. +- A hidden `.terraform` directory, which Terraform uses to manage cached + provider plugins and modules, record which + [workspace](/docs/cli/workspaces/index.html) is currently active, and + record the last known backend configuration in case it needs to migrate state + on the next run. This directory is automatically managed by Terraform, and is + created during initialization. +- State data, if the configuration uses the default `local` backend. This is + managed by Terraform in a `terraform.tfstate` file (if the directory only uses + the default workspace) or a `terraform.tfstate.d` directory (if the directory + uses multiple workspaces). + +## Initialization + +Run the `terraform init` command to initialize a working directory that contains +a Terraform configuration. After initialization, you will be able to perform +other commands, like `terraform plan` and `terraform apply`. + +If you try to run a command that relies on initialization without first +initializing, the command will fail with an error and explain that you need to +run init. + +Initialization performs several tasks to prepare a directory, including +accessing state in the configured backend, downloading and installing provider +plugins, and downloading modules. Under some conditions (usually when changing +from one backend to another), it might ask the user for guidance or +confirmation. + +For details, see [the `terraform init` command](/docs/cli/commands/init.html). + +## Reinitialization + +Certain types of changes to a Terraform configuration can require +reinitialization before normal operations can continue. This includes changes to +provider requirements, module sources or version constraints, and backend +configurations. + +You can reinitialize a directory by running `terraform init` again. In fact, you +can reinitialize at any time; the init command is idempotent, and will have no +effect if no changes are required. + +If reinitialization is required, any commands that rely on initialization will +fail with an error and tell you so. + +## Reinitializing Only Modules + +The `terraform get` command will download modules referenced in the +configuration, but will not perform the other required initialization tasks. +This command is only useful for niche workflows, and most Terraform users can +ignore it in favor of `terraform init`. diff --git a/website/docs/cli/inspect/index.html.md b/website/docs/cli/inspect/index.html.md new file mode 100644 index 000000000..f704c0398 --- /dev/null +++ b/website/docs/cli/inspect/index.html.md @@ -0,0 +1,33 @@ +--- +layout: "docs" +page_title: "Inspecting Infrastructure - Terraform CLI" +--- + +# Inspecting Infrastructure + +Terraform configurations and state data include some highly structured +information about the resources they manage; this includes dependency +information, outputs (which are pieces of generated or discovered data that the +configuration's author considers important enough to surface to users), and +more. + +Terraform CLI includes some commands for inspecting or transforming this data. +You can use these to integrate other tools with Terraform's infrastructure data, +or just to gain a deeper or more holistic understanding of your infrastructure. + +- [The `terraform graph` command](/docs/cli/commands/graph.html) creates a visual + representation of a configuration or a set of planned changes. +- [The `terraform output` command](/docs/cli/commands/output.html) can get the + values for the top-level [output values](/docs/language/values/outputs.html) of + a configuration, which are often helpful when making use of the infrastructure + Terraform has provisioned. +- [The `terraform show` command](/docs/cli/commands/show.html) can generate + human-readable versions of a state file or plan file, or generate + machine-readable versions that can be integrated with other tools. +- [The `terraform state list` command](/docs/cli/commands/state/list.html) can list + the resources being managed by the current working directory and workspace, + providing a complete or filtered list. +- [The `terraform state show` command](/docs/cli/commands/state/show.html) can print + all of the attributes of a given resource being managed by the current working + directory and workspace, including generated read-only attributes like the + unique ID assigned by the cloud provider. diff --git a/website/docs/cli/install/apt.html.md b/website/docs/cli/install/apt.html.md new file mode 100644 index 000000000..d55effb48 --- /dev/null +++ b/website/docs/cli/install/apt.html.md @@ -0,0 +1,134 @@ +--- +layout: "downloads" +page_title: "APT Packages for Debian and Ubuntu" +sidebar_current: "docs-cli-install-apt" +description: |- + The HashiCorp APT repositories contain distribution-specific Terraform packages for both Debian and Ubuntu systems. +--- + +# APT Packages for Debian and Ubuntu + +The primary distribution packages for Terraform are `.zip` archives containing +single executable files that you can extract anywhere on your system. However, +for easier integration with configuration management tools and other systematic +system configuration strategies, we also offer package repositories for +Debian and Ubuntu systems, which allow you to install Terraform using the +`apt install` command or any other APT frontend. + +If you are instead using Red Hat Enterprise Linux, CentOS, or Fedora, you +might prefer to [install Terraform from our Yum repositories](yum.html). + +-> **Note:** The APT repositories discussed on this page are generic HashiCorp +repositories that contain packages for a variety of different HashiCorp +products, rather than just Terraform. Adding these repositories to your +system will, by default, therefore make a number of other non-Terraform +packages available for installation. That might then mask some packages that +are available for some HashiCorp products in the main Debian and Ubuntu +package repositories. + +## Repository Configuration + +The Terraform packages are signed using a private key controlled by HashiCorp, +so in most situations the first step would be to configure your system to trust +that HashiCorp key for package authentication. For example: + +```bash +curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - +``` + +After registering the key, you can add the official HashiCorp repository to +your system: + +```bash +sudo apt-add-repository "deb [arch=$(dpkg --print-architecture)] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +``` + +The above command line uses the following sub-shell commands: + +* `dpkg --print-architecture` to determine your system's primary APT + architecture/ABI, such as `amd64`. +* `lsb_release -cs` to find the distribution release codename for your current + system, such as `buster`, `groovy`, or `sid`. + +`apt-add-repository` usually automatically runs `apt update` as part of its +work in order to fetch the new package indices, but if it does not then you +will need to so manually before the packages will be available. + +To install Terraform from the new repository: + +```bash +sudo apt install terraform +``` + +## Supported Architectures + +The HashiCorp APT server currently has packages only for the `amd64` +architecture, which is also sometimes known as `x86_64`. + +There are no official packages available for other architectures, such as +`arm64`. If you wish to use Terraform on a non-`amd64` system, +[download a normal release `.zip` file](/downloads.html) instead. + +## Supported Debian and Ubuntu Releases + +The HashiCorp APT server currently contains release repositories for the +following distribution releases: + +* Debian 8 (`jessie`) +* Debian 9 (`stretch`) +* Debian 10 (`buster`) +* Ubuntu 16.04 (`xenial`) +* Ubuntu 18.04 (`bionic`) +* Ubuntu 19.10 (`eoam`) +* Ubuntu 20.04 (`focal`) +* Ubuntu 20.10 (`groovy`) + +No repositories are available for other Debian or Ubuntu versions or for +any other APT-based Linux distributions. If you add the repository using +the above commands on other systems then `apt update` will report the +repository index as missing. + +Terraform executables are statically linked and so they depend only on the +Linux system call interface, not on any system libraries. Because of that, +you may be able to use one of the above release codenames when adding a +repository to your system, even if that codename doesn't match your current +distribution release. + +Over time we will change the set of supported distributions, including both +adding support for new releases and ceasing to publish new Terraform versions +under older releases. + +## Choosing Terraform Versions + +The HashiCorp APT repositories contain multiple versions of Terraform, but +because the packages are all named `terraform` it is impossible to install +more than one version at a time, and `apt install` will default to selecting +the latest version. + +It's often necessary to match your Terraform version with what a particular +configuration is currently expecting. You can use the following command to +see which versions are currently available in the repository index: + +```bash +apt policy terraform +``` + +There may be multiple package releases for a particular Terraform version if +we need to publish an updated package for any reason. In that case, the +subsequent releases will have an additional suffix, like `0.13.4-2`. In these +cases the Terraform executable inside the package should be unchanged, but its +metadata and other contents may be different. + +You can select a specific version to install by including it in the +`apt install` command line, as follows: + +```bash +sudo apt install terraform=0.14.0 +``` + +If your workflow requires using multiple versions of Terraform at the same +time, for example when working through a gradual upgrade where not all +of your configurations are upgraded yet, we recommend that you use the +official release `.zip` files instead of the APT packages, so you can install +multiple versions at once and then select which to use for each command you +run. diff --git a/website/docs/cli/install/yum.html.md b/website/docs/cli/install/yum.html.md new file mode 100644 index 000000000..45e88e0eb --- /dev/null +++ b/website/docs/cli/install/yum.html.md @@ -0,0 +1,121 @@ +--- +layout: "downloads" +page_title: "Yum Packages for Red Hat Enterprise Linux, Fedora, and Amazon Linux" +sidebar_current: "docs-cli-install-yum" +description: |- + The HashiCorp Yum repositories contain distribution-specific Terraform packages for Red Hat Enterprise Linux, Fedora, and Amazon Linux systems. +--- + +# Yum/DNF Packages for RHEL, CentOS, and Fedora + +The primary distribution packages for Terraform are `.zip` archives containing +single executable files that you can extract anywhere on your system. However, +for easier integration with configuration management tools and other systematic +system configuration strategies, we also offer package repositories for +RedHat Enterprise Linux, Fedora, and Amazon Linux systems, which allow you to +install Terraform using the `yum install` or `dnf install` commands. + +If you are instead using Debian or Ubuntu, you +might prefer to [install Terraform from our APT repositories](apt.html). + +-> **Note:** The Yum repositories discussed on this page are generic HashiCorp +repositories that contain packages for a variety of different HashiCorp +products, rather than just Terraform. Adding these repositories to your +system will, by default, therefore make a number of other non-Terraform +packages available for installation. That might then mask the packages that are +available for some HashiCorp products in the main distribution repositories. + +## Repository Configuration + +Before adding a repository you must determine which distribution you are using. +The following command lines refer to a placeholder variable `$release` which +you must replace with the appropriate value from the following list: + +* Red Hat Enterprise Linux: `RHEL` +* Fedora: `fedora` +* Amazon Linux: `AmazonLinux` + +If you are using a Yum-based distribution, add the repository using +`yum-config-manager` as follows: + +```bash +sudo yum install -y yum-utils +sudo yum-config-manager --add-repo https://rpm.releases.hashicorp.com/$release/hashicorp.repo +``` + +If you are using a DNF-based distribution, add the repository using +`dnf config-manager` as follows: + +```bash +sudo dnf install -y dnf-plugins-core +sudo dnf config-manager --add-repo https://rpm.releases.hashicorp.com/$release/hashicorp.repo +``` + +In both cases, the Terraform package name is `terraform`. For example: + +```bash +yum install terraform +``` + +## Supported Architectures + +The HashiCorp Yum/DNF server currently has packages only for the `x86_64` +architecture, which is also sometimes known as `amd64`. + +There are no official packages available for other architectures, such as +`aarch64`. If you wish to use Terraform on a non-`x86_64` system, +[download a normal release `.zip` file](/downloads.html) instead. + +## Supported Distribution Releases + +The HashiCorp Yum server currently contains release repositories for the +following distribution releases: + +* AmazonLinux 2 +* Fedora 29 +* Fedora 30 +* Fedora 31 +* Fedora 32 +* Fedora 33 +* RHEL 7 (and CentOS 7) +* RHEL 8 (and CentOS 8) + +No repositories are available for other versions of these distributions or for +any other RPM-based Linux distributions. If you add the repository using +the above commands on other systems then you will see a 404 Not Found error. + +Over time we will change the set of supported distributions, including both +adding support for new releases and ceasing to publish new Terraform versions +under older releases. + +## Choosing Terraform Versions + +The HashiCorp Yum repositories contain multiple versions of Terraform, but +because the packages are all named `terraform` it is impossible to install +more than one version at a time, and `yum install` or `dnf install` will +default to selecting the latest version. + +It's often necessary to match your Terraform version with what a particular +configuration is currently expecting. You can use the following command to +see which versions are currently available in the repository index: + +```bash +yum --showduplicate list terraform +``` + +You can select a specific version to install by including it in the +`yum install` command line, as follows: + +```bash +yum install terraform-0.14.0-2.x86_64 +``` + +If you are using a DNF-based distribution, similar use `dnf` instead of `yum` +when following the above steps. + +If your workflow requires using multiple versions of Terraform at the same +time, for example when working through a gradual upgrade where not all +of your configurations are upgraded yet, we recommend that you use the +official release `.zip` files instead of the Yum packages, so you can install +multiple versions at once and then select which to use for each command you +run. diff --git a/website/docs/cli/plugins/index.html.md b/website/docs/cli/plugins/index.html.md new file mode 100644 index 000000000..c1ac5e1e6 --- /dev/null +++ b/website/docs/cli/plugins/index.html.md @@ -0,0 +1,58 @@ +--- +layout: "docs" +page_title: "Managing Plugins - Terraform CLI" +--- + +# Managing Plugins + +Terraform relies on plugins called "providers" in order to manage various types +of resources. (For more information about providers, see +[Providers](/docs/language/providers/index.html) in the Terraform +language docs.) + +-> **Note:** Providers are currently the only plugin type most Terraform users +will interact with. Terraform also supports third-party provisioner plugins, but +we discourage their use. + +Terraform downloads and/or installs any providers +[required](/docs/language/providers/requirements.html) by a configuration +when [initializing](/docs/cli/init/index.html) a working directory. By default, +this works without any additional interaction but requires network access to +download providers from their source registry. + +You can configure Terraform's provider installation behavior to limit or skip +network access, and to enable use of providers that aren't available via a +networked source. Terraform also includes some commands to show information +about providers and to reduce the effort of installing providers in airgapped +environments. + +## Configuring Plugin Installation + +Terraform's configuration file includes options for caching downloaded plugins, +or explicitly specifying a local or HTTPS mirror to install plugins from. For +more information, see [CLI Config File](/docs/cli/config/config-file.html). + +## Getting Plugin Information + +Use the [`terraform providers`](/docs/cli/commands/providers.html) command to get information +about the providers required by the current working directory's configuration. + +Use the [`terraform version`](/docs/cli/commands/version.html) command (or +`terraform -version`) to show the specific provider versions installed for the +current working directory. + +Use the [`terraform providers schema`](/docs/cli/commands/providers/schema.html) command to +get machine-readable information about the resources and configuration options +offered by each provider. + +## Managing Plugin Installation + +Use the [`terraform providers mirror`](/docs/cli/commands/providers/mirror.html) command to +download local copies of every provider required by the current working +directory's configuration. This directory will use the nested directory layout +that Terraform expects when installing plugins from a local source, so you can +transfer it directly to an airgapped system that runs Terraform. + +Use the [`terraform providers lock`](/docs/cli/commands/providers/lock.html) command +to update the lock file that Terraform uses to ensure predictable runs when +using ambiguous provider version constraints. diff --git a/website/docs/plugins/signing.html.md b/website/docs/cli/plugins/signing.html.md similarity index 92% rename from website/docs/plugins/signing.html.md rename to website/docs/cli/plugins/signing.html.md index f1cfa7c68..95c8e935f 100644 --- a/website/docs/plugins/signing.html.md +++ b/website/docs/cli/plugins/signing.html.md @@ -1,11 +1,12 @@ --- -layout: "registry" +layout: "docs" page_title: "Plugin Signing" -sidebar_current: "docs-plugins-signing" description: |- Terraform plugin signing trust levels --- + + # Plugin Signing ~> **Note** Currently only provider plugins fetched from a registry are authenticated. @@ -13,7 +14,7 @@ description: |- Terraform providers installed from the Registry are cryptographically signed, and the signature is verified at time of installation. There are three types of provider signatures, each with different trust implications: * **Signed by HashiCorp** - are built, signed, and supported by HashiCorp. -* **Signed by Trusted Partners** - are built, signed, and supported by a third party. HashiCorp has +* **Signed by Trusted Partners** - are built, signed, and supported by a third party. HashiCorp has verified the ownership of the private key and we provide a chain of trust to the CLI to verify this programatically. * **Self-signed** - are built, signed, and supported by a third party. HashiCorp does not provide a diff --git a/website/docs/cli/run/index.html.md b/website/docs/cli/run/index.html.md new file mode 100644 index 000000000..579c9d81b --- /dev/null +++ b/website/docs/cli/run/index.html.md @@ -0,0 +1,71 @@ +--- +layout: "docs" +page_title: "Provisioning Infrastructure - Terraform CLI" +--- + +# Provisioning Infrastructure with Terraform + +Terraform's primary function is to create, modify, and destroy infrastructure +resources to match the desired state described in a +[Terraform configuration](/docs/language/index.html). + +When people refer to "running Terraform," they generally mean performing these +provisioning actions in order to affect real infrastructure objects. The +Terraform binary has many other subcommands for a wide variety of administrative +actions, but these basic provisioning tasks are the core of Terraform. + +Terraform's provisioning workflow relies on three commands: `plan`, `apply`, and +`destroy`. All of these commands require an +[initialized](/docs/cli/init/index.html) working directory, and all of them act +only upon the currently selected [workspace](/docs/cli/workspaces/index.html). + +## Planning + +The `terraform plan` command evaluates a Terraform configuration to determine +the desired state of all the resources it declares, then compares that desired +state to the real infrastructure objects being managed with the current working +directory and workspace. It uses state data to determine which real objects +correspond to which declared resources, and checks the current state of each +resource using the relevant infrastructure provider's API. + +Once it has determined the difference between the current state and the desired +state, `terraform plan` presents a description of the changes necessary to +achieve the desired state. It _does not_ perform any actual changes to real +world infrastructure objects; it only presents a plan for making changes. + +Plans are usually run to validate configuration changes and confirm that the +resulting actions are as expected. However, `terraform plan` can also save its +plan as a runnable artifact, which `terraform apply` can use to carry out those +exact changes. + +For details, see [the `terraform plan` command](/docs/cli/commands/plan.html). + +## Applying + +The `terraform apply` command performs a plan just like `terraform plan` does, +but then actually carries out the planned changes to each resource using the +relevant infrastructure provider's API. It asks for confirmation from the user +before making any changes, unless it was explicitly told to skip approval. + +By default, `terraform apply` performs a fresh plan right before applying +changes, and displays the plan to the user when asking for confirmation. +However, it can also accept a plan file produced by `terraform plan` in lieu of +running a new plan. You can use this to reliably perform an exact set of +pre-approved changes, even if the configuration or the state of the real +infrastructure has changed in the minutes since the original plan was created. + +For details, see [the `terraform apply` command](/docs/cli/commands/apply.html). + +## Destroying + +The `terraform destroy` command destroys all of the resources being managed by +the current working directory and workspace, using state data to determine which +real world objects correspond to managed resources. Like `terraform apply`, it +asks for confirmation before proceeding. + +A destroy behaves exactly like deleting every resource from the configuration +and then running an apply, except that it doesn't require editing the +configuration. This is more convenient if you intend to provision similar +resources at a later date. + +For details, see [the `terraform destroy` command](/docs/cli/commands/destroy.html). diff --git a/website/docs/cli/state/index.html.md b/website/docs/cli/state/index.html.md new file mode 100644 index 000000000..8448de795 --- /dev/null +++ b/website/docs/cli/state/index.html.md @@ -0,0 +1,30 @@ +--- +layout: "docs" +page_title: "Manipulating State - Terraform CLI" +--- + +# Manipulating Terraform State + +Terraform uses [state data](/docs/language/state/index.html) to remember which +real-world object corresponds to each resource in the configuration; +this allows it to modify an existing object when its resource declaration +changes. + +Terraform updates state automatically during plans and applies. However, it's +sometimes necessary to make deliberate adjustments to Terraform's state data, +usually to compensate for changes to the configuration or the real managed +infrastructure. + +Terraform CLI supports several workflows for interacting with state: + +- [Inspecting State](/docs/cli/state/inspect.html) +- [Forcing Re-creation (Tainting)](/docs/cli/state/taint.html) +- [Moving Resources](/docs/cli/state/move.html) +- Importing Pre-existing Resources (documented in the + [Importing Infrastructure](/docs/cli/import/index.html) section) +- [Disaster Recovery](/docs/cli/state/recover.html) + +~> **Important:** Modifying state data outside a normal plan or apply can cause +Terraform to lose track of managed resources, which might waste money, annoy +your colleagues, or even compromise the security of your operations. Make sure +to keep backups of your state data when modifying state out-of-band. diff --git a/website/docs/cli/state/inspect.html.md b/website/docs/cli/state/inspect.html.md new file mode 100644 index 000000000..53ccc6f95 --- /dev/null +++ b/website/docs/cli/state/inspect.html.md @@ -0,0 +1,21 @@ +--- +layout: "docs" +page_title: "Inspecting State - Terraform CLI" +--- + +# Inspecting State + +Terraform includes some commands for reading and updating state without taking +any other actions. + +- [The `terraform state list` command](/docs/cli/commands/state/list.html) + shows the resource addresses for every resource Terraform knows about in a + configuration, optionally filtered by partial resource address. + +- [The `terraform state show` command](/docs/cli/commands/state/show.html) + displays detailed state data about one resource. + +- [The `terraform refresh` command](/docs/cli/commands/refresh.html) updates + state data to match the real-world condition of the managed resources. This is + done automatically during plans and applies, but not when interacting with + state directly. diff --git a/website/docs/cli/state/move.html.md b/website/docs/cli/state/move.html.md new file mode 100644 index 000000000..87bb7198d --- /dev/null +++ b/website/docs/cli/state/move.html.md @@ -0,0 +1,35 @@ +--- +layout: "docs" +page_title: "Moving Resources - Terraform CLI" +--- + +# Moving Resources + +Terraform's state associates each real-world object with a configured resource +at a specific [resource address](/docs/cli/state/resource-addressing.html). This +is seamless when changing a resource's attributes, but Terraform will lose track +of a resource if you change its name, move it to a different module, or change +its provider. + +Usually that's fine: Terraform will destroy the old resource, replace it with a +new one (using the new resource address), and update any resources that rely on +its attributes. + +In cases where it's important to preserve an existing infrastructure object, you +can explicitly tell Terraform to associate it with a different configured +resource. + +- [The `terraform state mv` command](/docs/cli/commands/state/mv.html) changes + which resource address in your configuration is associated with a particular + real-world object. Use this to preserve an object when renaming a resource, or + when moving a resource into or out of a child module. + +- [The `terraform state rm` command](/docs/cli/commands/state/rm.html) tells + Terraform to stop managing a resource as part of the current working directory + and workspace, _without_ destroying the corresponding real-world object. (You + can later use `terraform import` to start managing that resource in a + different workspace or a different Terraform configuration.) + +- [The `terraform state replace-provider` command](/docs/cli/commands/state/replace-provider.html) + transfers existing resources to a new provider without requiring them to be + re-created. diff --git a/website/docs/cli/state/recover.html.md b/website/docs/cli/state/recover.html.md new file mode 100644 index 000000000..5b45b8f6f --- /dev/null +++ b/website/docs/cli/state/recover.html.md @@ -0,0 +1,24 @@ +--- +layout: "docs" +page_title: "Recovering from State Disasters - Terraform CLI" +--- + +# Recovering from State Disasters + +If something has gone horribly wrong (possibly due to accidents when performing +other state manipulation actions), you might need to take drastic actions with +your state data. + +- [The `terraform force-unlock` command](/docs/cli/commands/force-unlock.html) can + override the protections Terraform uses to prevent two processes from + modifying state at the same time. You might need this if a Terraform process + (like a normal apply) is unexpectedly terminated (like by the complete + destruction of the VM it's running in) before it can release its lock on the + state backend. Do not run this until you are completely certain what happened + to the process that caused the lock to get stuck. + +- [The `terraform state pull` command](/docs/cli/commands/state/pull.html) and + [the `terraform state push` command](/docs/cli/commands/state/push.html) can + directly read and write entire state files from and to the configured backend. + You might need this for obtaining or restoring a state backup. + diff --git a/website/docs/internals/resource-addressing.html.markdown b/website/docs/cli/state/resource-addressing.html.md similarity index 100% rename from website/docs/internals/resource-addressing.html.markdown rename to website/docs/cli/state/resource-addressing.html.md diff --git a/website/docs/cli/state/taint.html.md b/website/docs/cli/state/taint.html.md new file mode 100644 index 000000000..12162c36d --- /dev/null +++ b/website/docs/cli/state/taint.html.md @@ -0,0 +1,25 @@ +--- +layout: "docs" +page_title: "Forcing Re-creation of Resources (Tainting) - Terraform CLI" +--- + +# Forcing Re-creation of Resources (Tainting) + +When a resource declaration is modified, Terraform usually attempts to update +the existing resource in place (although some changes can require destruction +and re-creation, usually due to upstream API limitations). + +In some cases, you might want a resource to be destroyed and re-created even +when Terraform doesn't think it's necessary. This is usually for objects that +aren't fully described by their resource arguments due to side-effects that +happen during creation; for example, a virtual machine that configures itself +with `cloud-init` on startup might no longer meet your needs if the cloud-init +configuration changes. + +- [The `terraform taint` command](/docs/cli/commands/taint.html) tells Terraform to + destroy and re-create a particular resource during the next apply, regardless + of whether its resource arguments would normally require that. + +- [The `terraform untaint` command](/docs/cli/commands/untaint.html) undoes a + previous taint, or can preserve a resource that was automatically tainted due + to failed [provisioners](/docs/language/resources/provisioners/syntax.html). diff --git a/website/docs/cli/workspaces/index.html.md b/website/docs/cli/workspaces/index.html.md new file mode 100644 index 000000000..f6a76a0e6 --- /dev/null +++ b/website/docs/cli/workspaces/index.html.md @@ -0,0 +1,78 @@ +--- +layout: "docs" +page_title: "Managing Workspaces - Terraform CLI" +--- + +# Managing Workspaces + +In Terraform CLI, _workspaces_ are separate instances of +[state data](/docs/language/state/index.html) that can be used from the same working +directory. You can use workspaces to manage multiple non-overlapping groups of +resources with the same configuration. + +- Every [initialized working directory](/docs/cli/init/index.html) has at least + one workspace. (If you haven't created other workspaces, it is a workspace + named `default`.) +- For a given working directory, only one workspace can be _selected_ at a time. +- Most Terraform commands (including [provisioning](/docs/cli/run/index.html) + and [state manipulation](/docs/cli/state/index.html) commands) only interact + with the currently selected workspace. +- Use [the `terraform workspace select` command](/docs/cli/commands/workspace/select.html) + to change the currently selected workspace. +- Use the [`terraform workspace list`](/docs/cli/commands/workspace/list.html), + [`terraform workspace new`](/docs/cli/commands/workspace/new.html), and + [`terraform workspace delete`](/docs/cli/commands/workspace/delete.html) commands + to manage the available workspaces in the current working directory. + +-> **Note:** Terraform Cloud and Terraform CLI both have features called +"workspaces," but they're slightly different. Terraform Cloud's workspaces +behave more like completely separate working directories. + +## The Purpose of Workspaces + +Since most of the resources you can manage with Terraform don't include a unique +name as part of their configuration, it's common to use the same Terraform +configuration to provision multiple groups of similar resources. + +Terraform relies on [state](/docs/language/state/index.html) to associate resources with +real-world objects, so if you run the same configuration multiple times with +completely separate state data, Terraform can manage many non-overlapping groups +of resources. In some cases you'll want to change +[variable values](/docs/language/values/variables.html) for these different +resource collections (like when specifying differences between staging and +production deployments), and in other cases you might just want many instances +of a particular infrastructure pattern. + +The simplest way to maintain multiple instances of a configuration with +completely separate state data is to use multiple +[working directories](/docs/cli/init/index.html) (with different +[backend](/docs/language/settings/backends/configuration.html) configurations per directory, if you +aren't using the default `local` backend). + +However, this isn't always the most _convenient_ way to handle separate states. +Terraform installs a separate cache of plugins and modules for each working +directory, so maintaining multiple directories can waste bandwidth and disk +space. You must also update your configuration code from version control +separately for each directory, reinitialize each directory separately when +changing the configuration, etc. + +Workspaces allow you to use the same working copy of your configuration and the +same plugin and module caches, while still keeping separate states for each +collection of resources you manage. + +## Interactions with Terraform Cloud Workspaces + +Terraform Cloud organizes infrastructure using workspaces, but its workspaces +act more like completely separate working directories; each Terraform Cloud +workspace has its own Terraform configuration, set of variable values, state +data, run history, and settings. + +These two kinds of workspaces are different, but related. When using Terraform +CLI as a frontend for Terraform Cloud, you associate the current working +directory with one or more remote workspaces by configuring +[the `remote` backend](/docs/language/settings/backends/remote.html). If you associate the +directory with multiple workspaces (using a name prefix), you can use the +`terraform workspace` commands to select which remote workspace to use. + +For more information about using Terraform CLI with Terraform Cloud, see +[CLI-driven Runs](/docs/cloud/run/cli.html) in the Terraform Cloud docs. diff --git a/website/docs/commands/output.html.markdown b/website/docs/commands/output.html.markdown deleted file mode 100644 index 5b072eb16..000000000 --- a/website/docs/commands/output.html.markdown +++ /dev/null @@ -1,86 +0,0 @@ ---- -layout: "docs" -page_title: "Command: output" -sidebar_current: "docs-commands-output" -description: |- - The `terraform output` command is used to extract the value of an output variable from the state file. ---- - -# Command: output - -The `terraform output` command is used to extract the value of -an output variable from the state file. - -## Usage - -Usage: `terraform output [options] [NAME]` - -With no additional arguments, `output` will display all the outputs for -the root module. If an output `NAME` is specified, only the value of that -output is printed. - -The command-line flags are all optional. The list of available flags are: - -* `-json` - If specified, the outputs are formatted as a JSON object, with - a key per output. If `NAME` is specified, only the output specified will be - returned. This can be piped into tools such as `jq` for further processing. -* `-no-color` - If specified, output won't contain any color. -* `-state=path` - Path to the state file. Defaults to "terraform.tfstate". - Ignored when [remote state](/docs/state/remote.html) is used. - -## Examples - -These examples assume the following Terraform output snippet. - -```hcl -output "lb_address" { - value = "${aws_alb.web.public_dns}" -} - -output "instance_ips" { - value = ["${aws_instance.web.*.public_ip}"] -} - -output "password" { - sensitive = true - value = ["${var.secret_password}"] -} -``` - -To list all outputs: - -```shell -$ terraform output -``` - -Note that outputs with the `sensitive` attribute will be redacted: -```shell -$ terraform output password -password = -``` - -To query for the DNS address of the load balancer: - -```shell -$ terraform output lb_address -my-app-alb-1657023003.us-east-1.elb.amazonaws.com -``` - -To query for all instance IP addresses: - -```shell -$ terraform output instance_ips -test = [ - 54.43.114.12, - 52.122.13.4, - 52.4.116.53 -] -``` - -To query for a particular value in a list, use `-json` and a JSON -command-line parser such as [jq](https://stedolan.github.io/jq/). -For example, to query for the first instance's IP address: - -```shell -$ terraform output -json instance_ips | jq '.value[0]' -``` diff --git a/website/docs/commands/state/addressing.html.md b/website/docs/commands/state/addressing.html.md deleted file mode 100644 index 0df951188..000000000 --- a/website/docs/commands/state/addressing.html.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -layout: "commands-state" -page_title: "Command: state resource addressing" -sidebar_current: "docs-commands-state-address" -description: |- - The `terraform state` command is used for advanced state management. ---- - -# Resource Addressing - -The `terraform state` subcommands use -[standard address syntax](/docs/internals/resource-addressing.html) to refer -to individual resources, resource instances, and modules. This is the same -syntax used for the `-target` option to the `apply` and `plan` commands. - -Most state commands allow referring to individual resource instances, whole -resources (which may have multiple instances if `count` or `for_each` is used), -or even whole modules. - -For more information on the syntax, see [Resource Addressing](/docs/internals/resource-addressing.html). diff --git a/website/docs/configuration-0-11/data-sources.html.md b/website/docs/configuration-0-11/data-sources.html.md index 7f5c7e680..90f3463e3 100644 --- a/website/docs/configuration-0-11/data-sources.html.md +++ b/website/docs/configuration-0-11/data-sources.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Data Sources - 0.11 Configuration Language" sidebar_current: "docs-conf-old-data-sources" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Configuring Data Sources](../configuration/data-sources.html). +[Configuration Language: Configuring Data Sources](/docs/language/data-sources/index.html). *Data sources* allow data to be fetched or computed for use elsewhere in Terraform configuration. Use of data sources allows a Terraform diff --git a/website/docs/configuration-0-11/environment-variables.html.md b/website/docs/configuration-0-11/environment-variables.html.md index 083d95048..979980884 100644 --- a/website/docs/configuration-0-11/environment-variables.html.md +++ b/website/docs/configuration-0-11/environment-variables.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Environment Variables - 0.11 Configuration Language" sidebar_current: "docs-conf-old-environment-variables" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Commands: Environment Variables](../commands/environment-variables.html). +[Commands: Environment Variables](/docs/cli/config/environment-variables.html). ## TF_LOG @@ -48,7 +48,7 @@ export TF_INPUT=0 ## TF_MODULE_DEPTH -When given a value, causes terraform commands to behave as if the `-module-depth=VALUE` flag was specified. By setting this to 0, for example, you enable commands such as [plan](/docs/commands/plan.html) and [graph](/docs/commands/graph.html) to display more compressed information. +When given a value, causes terraform commands to behave as if the `-module-depth=VALUE` flag was specified. By setting this to 0, for example, you enable commands such as [plan](/docs/cli/commands/plan.html) and [graph](/docs/cli/commands/graph.html) to display more compressed information. ```shell export TF_MODULE_DEPTH=0 diff --git a/website/docs/configuration-0-11/index.html.md b/website/docs/configuration-0-11/index.html.md index eb4167aa9..35982822a 100644 --- a/website/docs/configuration-0-11/index.html.md +++ b/website/docs/configuration-0-11/index.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "0.11 Configuration Language" sidebar_current: "docs-conf-old" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language](../configuration/index.html). +[Configuration Language](/docs/language/index.html). Terraform uses text files to describe infrastructure and to set variables. These text files are called Terraform _configurations_ and end in diff --git a/website/docs/configuration-0-11/interpolation.html.md b/website/docs/configuration-0-11/interpolation.html.md index 738783625..9ec65d8aa 100644 --- a/website/docs/configuration-0-11/interpolation.html.md +++ b/website/docs/configuration-0-11/interpolation.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Interpolation Syntax - 0.11 Configuration Language" sidebar_current: "docs-conf-old-interpolation" description: |- @@ -10,8 +10,8 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Expressions](../configuration/expressions.html) and -[Configuration Language: Functions](../configuration/functions.html). +[Configuration Language: Expressions](/docs/language/expressions/index.html) and +[Configuration Language: Functions](/docs/language/functions/index.html). Embedded within strings in Terraform, whether you're using the Terraform syntax or JSON syntax, you can interpolate other values. These @@ -77,7 +77,7 @@ syntax to get a list of all the attributes: `${data.aws_subnet.example.*.cidr_bl The syntax is `module..`. For example `${module.foo.bar}` will interpolate the `bar` output from the `foo` -[module](/docs/modules/index.html). +[module](/docs/language/modules/develop/index.html). #### Count information @@ -98,7 +98,7 @@ path of the root module. In general, you probably want the The syntax is `terraform.`. This variable type contains metadata about the currently executing Terraform run. FIELD can currently only be `env` to -reference the currently active [state environment](/docs/state/environments.html). +reference the currently active workspace. ## Conditionals @@ -409,7 +409,7 @@ The supported built-in functions are: * `timestamp()` - Returns a UTC timestamp string in RFC 3339 format. This string will change with every invocation of the function, so in order to prevent diffs on every plan & apply, it must be used with the - [`ignore_changes`](./resources.html#ignore-changes) lifecycle attribute. + [`ignore_changes`](./resources.html#ignore_changes) lifecycle attribute. * `timeadd(time, duration)` - Returns a UTC timestamp string corresponding to adding a given `duration` to `time` in RFC 3339 format. For example, `timeadd("2017-11-22T00:00:00Z", "10m")` produces a value `"2017-11-22T00:10:00Z"`. @@ -424,7 +424,7 @@ The supported built-in functions are: * `urlencode(string)` - Returns an URL-safe copy of the string. - * `uuid()` - Returns a random UUID string. This string will change with every invocation of the function, so in order to prevent diffs on every plan & apply, it must be used with the [`ignore_changes`](./resources.html#ignore-changes) lifecycle attribute. + * `uuid()` - Returns a random UUID string. This string will change with every invocation of the function, so in order to prevent diffs on every plan & apply, it must be used with the [`ignore_changes`](./resources.html#ignore_changes) lifecycle attribute. * `values(map)` - Returns a list of the map values, in the order of the keys returned by the `keys` function. This function only works on flat maps and @@ -449,7 +449,7 @@ Terraform 0.12 and later. ## Templates Long strings can be managed using templates. -[Templates](/docs/providers/template/index.html) are +[Templates](https://registry.terraform.io/providers/hashicorp/template/latest/docs) are [data-sources](./data-sources.html) defined by a string with interpolation tokens (usually loaded from a file) and some variables to use during interpolation. They have a computed `rendered` attribute @@ -487,7 +487,7 @@ by the surrounding scope of the configuration. You may use any of the built-in functions in your template. For more details on template usage, please see the -[template_file documentation](/docs/providers/template/d/file.html). +[template_file documentation](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file). ### Using Templates with Count @@ -560,7 +560,7 @@ Operator precedences is the standard mathematical order of operations: "${2 * (4 + 3) * 3}" # computes to 42 ``` -You can use the [terraform console](/docs/commands/console.html) command to +You can use the [terraform console](/docs/cli/commands/console.html) command to try the math operations. -> **Note:** Since Terraform allows hyphens in resource and variable names, diff --git a/website/docs/configuration-0-11/load.html.md b/website/docs/configuration-0-11/load.html.md index 75ddcb191..b95adffab 100644 --- a/website/docs/configuration-0-11/load.html.md +++ b/website/docs/configuration-0-11/load.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Load Order and Semantics - 0.11 Configuration Language" sidebar_current: "docs-conf-old-load" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language](../configuration/index.html). +[Configuration Language](/docs/language/index.html). When invoking any command that loads the Terraform configuration, Terraform loads all configuration files within the directory diff --git a/website/docs/configuration-0-11/locals.html.md b/website/docs/configuration-0-11/locals.html.md index 74dc658a3..b23a5b3a1 100644 --- a/website/docs/configuration-0-11/locals.html.md +++ b/website/docs/configuration-0-11/locals.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Local Values - 0.11 Configuration Language" sidebar_current: "docs-conf-old-locals" description: |- @@ -11,7 +11,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Configuring Local Values](../configuration/locals.html). +[Configuration Language: Configuring Local Values](/docs/language/values/locals.html). Local values assign a name to an expression, that can then be used multiple times within a module. diff --git a/website/docs/configuration-0-11/modules.html.md b/website/docs/configuration-0-11/modules.html.md index 6ac2a0b33..25f2984d7 100644 --- a/website/docs/configuration-0-11/modules.html.md +++ b/website/docs/configuration-0-11/modules.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Modules - 0.11 Configuration Language" sidebar_current: "docs-conf-old-modules" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Modules](../configuration/modules.html). +[Configuration Language: Modules](/docs/language/modules/index.html). A _module_ is a container for multiple resources that are used together. @@ -27,7 +27,7 @@ and re-used. This page describes how to call one module from another. Other pages in this section of the documentation describe the different elements that make up modules, and there is further information about how modules can be used, -created, and published in [the dedicated _Modules_ section](/docs/modules/index.html). +created, and published in [the dedicated _Modules_ section](/docs/language/modules/develop/index.html). ## Calling a Child Module @@ -62,7 +62,7 @@ Terraform CLI. Its value is either the path to a local directory of the module's configuration files, or a remote module source that Terraform should download and use. This value must be a literal string with no template sequences; interpolations are not allowed. For more information on -possible values for this argument, see [Module Sources](/docs/modules/sources.html). +possible values for this argument, see [Module Sources](/docs/language/modules/sources.html). The same source address can be specified in multiple `module` blocks to create multiple copies of the resources defined within, possibly with different @@ -161,7 +161,7 @@ future features. Since modules are a complex feature in their own right, further detail about how modules can be used, created, and published is included in -[the dedicated section on modules](/docs/modules/index.html). +[the dedicated section on modules](/docs/language/modules/develop/index.html). ## Providers within Modules @@ -233,7 +233,7 @@ resource "aws_s3_bucket" "example" { This approach is recommended in the common case where only a single configuration is needed for each provider across the entire configuration. -In more complex situations there may be [multiple provider instances](/docs/configuration/providers.html#multiple-provider-instances), +In more complex situations there may be [multiple provider instances](./providers.html#multiple-provider-instances), or a child module may need to use different provider settings than its parent. For such situations, it's necessary to pass providers explicitly as we will see in the next section. @@ -272,7 +272,7 @@ module "example" { The `providers` argument within a `module` block is similar to the `provider` argument within a resource as described for -[multiple provider instances](/docs/configuration/providers.html#multiple-provider-instances), +[multiple provider instances](./providers.html#multiple-provider-instances), but is a map rather than a single string because a module may contain resources from many different providers. @@ -386,7 +386,7 @@ giving each instance a unique name -- here `module "assets_bucket"` and Resources from child modules are prefixed with `module.` when displayed in plan output and elsewhere in the UI. For example, the `./publish_bucket` module contains `aws_s3_bucket.example`, and so the two -instances of this module produce S3 bucket resources with [_resource addresses_](/docs/internals/resource-addressing.html) +instances of this module produce S3 bucket resources with [_resource addresses_](/docs/cli/state/resource-addressing.html) `module.assets_bucket.aws_s3_bucket.example` and `module.media_bucket.aws_s3_bucket.example` respectively. These full addresses are used within the UI and on the command line, but are not valid within interpolation expressions due to the @@ -405,7 +405,7 @@ several regions or datacenters. ## Tainting resources within a module -The [taint command](/docs/commands/taint.html) can be used to _taint_ specific +The [taint command](/docs/cli/commands/taint.html) can be used to _taint_ specific resources within a module: ```shell diff --git a/website/docs/configuration-0-11/outputs.html.md b/website/docs/configuration-0-11/outputs.html.md index 7e43180ca..cf9e8b434 100644 --- a/website/docs/configuration-0-11/outputs.html.md +++ b/website/docs/configuration-0-11/outputs.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Output Values - 0.11 Configuration Language" sidebar_current: "docs-conf-old-outputs" description: |- @@ -10,11 +10,11 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Output Values](../configuration/outputs.html). +[Configuration Language: Output Values](/docs/language/values/outputs.html). Outputs define values that will be highlighted to the user when Terraform applies, and can be queried easily using the -[output command](/docs/commands/output.html). +[output command](/docs/cli/commands/output.html). Terraform knows a lot about the infrastructure it manages. Most resources have attributes associated with them, and diff --git a/website/docs/configuration-0-11/override.html.md b/website/docs/configuration-0-11/override.html.md index 6311443ff..83411f592 100644 --- a/website/docs/configuration-0-11/override.html.md +++ b/website/docs/configuration-0-11/override.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Override Files - 0.11 Configuration Language" sidebar_current: "docs-conf-old-override" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Override Files](../configuration/override.html). +[Configuration Language: Override Files](/docs/language/files/override.html). Terraform loads all configuration files within a directory and appends them together. Terraform also has a concept of _overrides_, diff --git a/website/docs/configuration-0-11/providers.html.md b/website/docs/configuration-0-11/providers.html.md index d74aba634..2450f38e6 100644 --- a/website/docs/configuration-0-11/providers.html.md +++ b/website/docs/configuration-0-11/providers.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Providers - 0.11 Configuration Language" sidebar_current: "docs-conf-old-providers" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Providers](../configuration/providers.html). +[Configuration Language: Providers](/docs/language/providers/index.html). Providers are responsible in Terraform for managing the lifecycle of a [resource](./resources.html): create, @@ -79,7 +79,7 @@ distributed by HashiCorp. See [Third-party Plugins](#third-party-plugins) below for installation instructions. For more information, see -[the `terraform init` command](/docs/commands/init.html). +[the `terraform init` command](/docs/cli/commands/init.html). ## Provider Versions @@ -118,7 +118,7 @@ provider "aws" { ``` This special argument applies to _all_ providers. -[`terraform providers`](/docs/commands/providers.html) can be used to +[`terraform providers`](/docs/cli/commands/providers.html) can be used to view the specified version constraints for all providers used in the current configuration. @@ -220,7 +220,7 @@ may also be used, but currently may cause errors when running `terraform destroy ## Third-party Plugins Anyone can develop and distribute their own Terraform providers. (See -[Writing Custom Providers](/docs/extend/writing-custom-providers.html) for more +[Writing Custom Providers](https://learn.hashicorp.com/collections/terraform/providers?utm_source=WEBSITE/docs/extend/writing-custom-providers.htmlutm_medium=WEB_IO/docs/extend/writing-custom-providers.htmlutm_offer=ARTICLE_PAGE/docs/extend/writing-custom-providers.htmlutm_content=DOCS) for more about provider development.) These third-party providers must be manually installed, since `terraform init` cannot automatically download them. @@ -295,7 +295,7 @@ use of a local directory as a shared plugin cache, which then allows each distinct plugin binary to be downloaded only once. To enable the plugin cache, use the `plugin_cache_dir` setting in -[the CLI configuration file](https://www.terraform.io/docs/commands/cli-config.html). +[the CLI configuration file](https://www.terraform.io/docs/cli/config/config-file.html). For example: ```hcl diff --git a/website/docs/configuration-0-11/resources.html.md b/website/docs/configuration-0-11/resources.html.md index d7bbed97c..8b31603f4 100644 --- a/website/docs/configuration-0-11/resources.html.md +++ b/website/docs/configuration-0-11/resources.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Resources - 0.11 Configuration Language" sidebar_current: "docs-conf-old-resources" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Resources](../configuration/resources.html). +[Configuration Language: Resources](/docs/language/resources/index.html). The most important thing you'll configure with Terraform are resources. Resources are a component of your infrastructure. @@ -98,7 +98,7 @@ There are **meta-parameters** available to all resources: Individual Resources may provide a `timeouts` block to enable users to configure the amount of time a specific operation is allowed to take before being considered an error. For example, the -[aws_db_instance](/docs/providers/aws/r/db_instance.html#timeouts) +[aws_db_instance](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance#timeouts) resource provides configurable timeouts for the `create`, `update`, and `delete` operations. Any Resource that provides Timeouts will document the default values for that operation, and users can overwrite @@ -176,7 +176,7 @@ could automatically do this a better way. Within a resource, you can optionally have a **connection block**. Connection blocks describe to Terraform how to connect to the resource for -[provisioning](/docs/provisioners/index.html). This block doesn't +[provisioning](/docs/language/resources/provisioners/syntax.html). This block doesn't need to be present if you're using only local provisioners, or if you're not provisioning at all. @@ -184,13 +184,13 @@ Resources provide some data on their own, such as an IP address, but other data must be specified by the user. The full list of settings that can be specified are listed on -the [provisioner connection page](/docs/provisioners/connection.html). +the [provisioner connection page](/docs/language/resources/provisioners/connection.html). ### Provisioners Within a resource, you can specify zero or more **provisioner blocks**. Provisioner blocks configure -[provisioners](/docs/provisioners/index.html). +[provisioners](/docs/language/resources/provisioners/syntax.html). Within the provisioner block is provisioner-specific configuration, much like resource-specific configuration. @@ -211,7 +211,7 @@ You can use the `${count.index}` [variable](./variables.html) to accomplish this. For example, here's how you could create three [AWS -Instances](/docs/providers/aws/r/instance.html) each with their own +Instances](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) each with their own static IP address: ```hcl @@ -232,7 +232,7 @@ resource "aws_instance" "app" { To reference a particular instance of a resource you can use `resource.foo.*.id[#]` where `#` is the index number of the instance. -For example, to create a list of all [AWS subnet](/docs/providers/aws/r/subnet.html) ids vs referencing a specific subnet in the list you can use this syntax: +For example, to create a list of all [AWS subnet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) ids vs referencing a specific subnet in the list you can use this syntax: ```hcl resource "aws_vpc" "foo" { diff --git a/website/docs/configuration-0-11/syntax.html.md b/website/docs/configuration-0-11/syntax.html.md index d438e77a0..8c0e57d31 100644 --- a/website/docs/configuration-0-11/syntax.html.md +++ b/website/docs/configuration-0-11/syntax.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Syntax - 0.11 Configuration Language" sidebar_current: "docs-conf-old-syntax" description: |- @@ -14,7 +14,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Syntax](../configuration/syntax.html). +[Configuration Language: Syntax](/docs/language/syntax/configuration.html). The syntax of Terraform configurations is called [HashiCorp Configuration Language (HCL)](https://github.com/hashicorp/hcl). It is meant to strike a diff --git a/website/docs/configuration-0-11/terraform-enterprise.html.md b/website/docs/configuration-0-11/terraform-enterprise.html.md index 1b2636887..8c5ee7f38 100644 --- a/website/docs/configuration-0-11/terraform-enterprise.html.md +++ b/website/docs/configuration-0-11/terraform-enterprise.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Terraform Push - 0.11 Configuration Language" sidebar_current: "docs-conf-old-push" description: |- @@ -13,9 +13,9 @@ feature that was removed in Terraform 0.12. ~> **Important:** The `terraform push` command is deprecated, and only works with the legacy version of Terraform Enterprise. In the current version of Terraform Cloud, you can upload configurations using the API. See [the docs about API-driven runs](/docs/cloud/run/api.html) for more details. -The [`terraform push` command](/docs/commands/push.html) uploads a configuration to a Terraform Enterprise (legacy) environment. The name of the environment (and the organization it's in) can be specified on the command line, or as part of the Terraform configuration in an `atlas` block. +The [`terraform push` command](/docs/cli/commands/push.html) uploads a configuration to a Terraform Enterprise (legacy) environment. The name of the environment (and the organization it's in) can be specified on the command line, or as part of the Terraform configuration in an `atlas` block. -The `atlas` block does not configure remote state; it only configures the push command. For remote state, [use a `terraform { backend "" {...} }` block](/docs/backends/config.html). +The `atlas` block does not configure remote state; it only configures the push command. For remote state, use a `terraform { backend "" {...} }` block. This page assumes you're familiar with the [configuration syntax](./syntax.html) @@ -40,7 +40,7 @@ Enterprise**. While this transition is in progress, you may see references to ## Description The `atlas` block configures the settings when Terraform is -[pushed](/docs/commands/push.html) to Terraform Enterprise. Only one `atlas` block +[pushed](/docs/cli/commands/push.html) to Terraform Enterprise. Only one `atlas` block is allowed. Within the block (the `{ }`) is configuration for Atlas uploading. @@ -50,7 +50,7 @@ No keys are required, but the key typically set is `name`. to the nature of this configuration, interpolations are not possible. If you want to parameterize these settings, use the Atlas block to set defaults, then use the command-line flags of the -[push command](/docs/commands/push.html) to override. +[push command](/docs/cli/commands/push.html) to override. ## Syntax diff --git a/website/docs/configuration-0-11/terraform.html.md b/website/docs/configuration-0-11/terraform.html.md index ff75cbced..e7911ffee 100644 --- a/website/docs/configuration-0-11/terraform.html.md +++ b/website/docs/configuration-0-11/terraform.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Terraform Settings - 0.11 Configuration Language" sidebar_current: "docs-conf-old-terraform" description: |- @@ -10,7 +10,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Terraform Settings](../configuration/terraform.html). +[Configuration Language: Terraform Settings](/docs/language/settings/index.html). The `terraform` configuration section is used to configure Terraform itself, such as requiring a minimum Terraform version to execute a configuration. @@ -41,7 +41,7 @@ that must be met to perform operations on this configuration. If the running Terraform version doesn't meet these constraints, an error is shown. See the section below dedicated to this option. -See [backends](/docs/backends/index.html) for more detail on the `backend` +See [backends](/docs/language/settings/backends/configuration.html) for more detail on the `backend` configuration. **No value within the `terraform` block can use interpolations.** The diff --git a/website/docs/configuration-0-11/variables.html.md b/website/docs/configuration-0-11/variables.html.md index 7201d6420..6fbcf5451 100644 --- a/website/docs/configuration-0-11/variables.html.md +++ b/website/docs/configuration-0-11/variables.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Input Variables - 0.11 Configuration Language" sidebar_current: "docs-conf-old-variables" description: |- @@ -11,7 +11,7 @@ description: |- -> **Note:** This page is about Terraform 0.11 and earlier. For Terraform 0.12 and later, see -[Configuration Language: Input Variables](../configuration/variables.html). +[Configuration Language: Input Variables](/docs/language/values/variables.html). Input variables serve as parameters for a Terraform module. @@ -211,7 +211,7 @@ $ TF_VAR_image=foo terraform apply ``` Maps and lists can be specified using environment variables as well using -[HCL](./syntax.html#HCL) syntax in the value. +[HCL](./syntax.html) syntax in the value. For a list variable like so: diff --git a/website/docs/configuration/expressions.html.md b/website/docs/configuration/expressions.html.md index 6b4db6111..9aa59acde 100644 --- a/website/docs/configuration/expressions.html.md +++ b/website/docs/configuration/expressions.html.md @@ -1,918 +1,157 @@ --- -layout: "docs" -page_title: "Expressions - Configuration Language" +layout: "language" +page_title: "Expressions Landing Page - Configuration Language" sidebar_current: "docs-config-expressions" -description: |- - The Terraform language allows the use of expressions to access data exported - by resources and to transform and combine that data to produce other values. --- -# Expressions +# Expressions Landing Page --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../configuration-0-11/interpolation.html). +To improve navigation, we've split the old Expressions page into several smaller +pages. -_Expressions_ are used to refer to or compute values within a configuration. -The simplest expressions are just literal values, like `"hello"` or `5`, -but the Terraform language also allows more complex expressions such as -references to data exported by resources, arithmetic, conditional evaluation, -and a number of built-in functions. + + + + + -Expressions can be used in a number of places in the Terraform language, -but some contexts limit which expression constructs are allowed, -such as requiring a literal value of a particular type or forbidding -[references to resource attributes](/docs/configuration/expressions.html#references-to-resource-attributes). -Each language feature's documentation describes any restrictions it places on expressions. +## Types and Values, Literal Expressions, Indices and Attributes -You can experiment with the behavior of Terraform's expressions from -the Terraform expression console, by running -[the `terraform console` command](/docs/commands/console.html). +Terraform's types are `string`, `number`, `bool`, `list`, `tuple`, `map`, +`object`, and `null`. -The rest of this page describes all of the features of Terraform's -expression syntax. +This information has moved to +[Types and Values](/docs/language/expressions/types.html). -## Types and Values +
-The result of an expression is a _value_. All values have a _type_, which -dictates where that value can be used and what transformations can be -applied to it. -The Terraform language uses the following types for its values: -* `string`: a sequence of Unicode characters representing some text, like - `"hello"`. -* `number`: a numeric value. The `number` type can represent both whole - numbers like `15` and fractional values like `6.283185`. -* `bool`: either `true` or `false`. `bool` values can be used in conditional - logic. -* `list` (or `tuple`): a sequence of values, like - `["us-west-1a", "us-west-1c"]`. Elements in a list or tuple are identified by - consecutive whole numbers, starting with zero. -* `map` (or `object`): a group of values identified by named labels, like - `{name = "Mabel", age = 52}`. + + + + + + -Strings, numbers, and bools are sometimes called _primitive types._ Lists/tuples and maps/objects are sometimes called _complex types,_ _structural types,_ or _collection types._ +## References to Named Values (Resource Attributes, Variables, etc.) -Finally, there is one special value that has _no_ type: +You can refer to certain values by name, like `var.some_variable` or +`aws_instance.example.ami`. -* `null`: a value that represents _absence_ or _omission._ If you set an - argument of a resource or module to `null`, Terraform behaves as though you - had completely omitted it — it will use the argument's default value if it has - one, or raise an error if the argument is mandatory. `null` is most useful in - conditional expressions, so you can dynamically omit an argument if a - condition isn't met. +This information has moved to +[References to Values](/docs/language/expressions/references.html). -### Advanced Type Details +
-In most situations, lists and tuples behave identically, as do maps and objects. -Whenever the distinction isn't relevant, the Terraform documentation uses each -pair of terms interchangeably (with a historical preference for "list" and -"map"). -However, module authors and provider developers should understand the -differences between these similar types (and the related `set` type), since they -offer different ways to restrict the allowed values for input variables and -resource arguments. -For complete details about these types (and an explanation of why the difference -usually doesn't matter), see [Type Constraints](./types.html). - -### Type Conversion - -Expressions are most often used to set values for the arguments of resources and -child modules. In these cases, the argument has an expected type and the given -expression must produce a value of that type. - -Where possible, Terraform automatically converts values from one type to -another in order to produce the expected type. If this isn't possible, Terraform -will produce a type mismatch error and you must update the configuration with a -more suitable expression. - -Terraform automatically converts number and bool values to strings when needed. -It also converts strings to numbers or bools, as long as the string contains a -valid representation of a number or bool value. - -* `true` converts to `"true"`, and vice-versa -* `false` converts to `"false"`, and vice-versa -* `15` converts to `"15"`, and vice-versa - -## Literal Expressions - -A _literal expression_ is an expression that directly represents a particular -constant value. Terraform has a literal expression syntax for each of the value -types described above: - -* Strings are usually represented by a double-quoted sequence of Unicode - characters, `"like this"`. There is also a "heredoc" syntax for more complex - strings. String literals are the most complex kind of literal expression in - Terraform, and have additional documentation on this page: - * See [String Literals](#string-literals) below for information about escape - sequences and the heredoc syntax. - * See [String Templates](#string-templates) below for information about - interpolation and template directives. -* Numbers are represented by unquoted sequences of digits with or without a - decimal point, like `15` or `6.283185`. -* Bools are represented by the unquoted symbols `true` and `false`. -* The null value is represented by the unquoted symbol `null`. -* Lists/tuples are represented by a pair of square brackets containing a - comma-separated sequence of values, like `["a", 15, true]`. - - List literals can be split into multiple lines for readability, but always - require a comma between values. A comma after the final value is allowed, - but not required. Values in a list can be arbitrary expressions. -* Maps/objects are represented by a pair of curly braces containing a series of - ` = ` pairs: - - ```hcl - { - name = "John" - age = 52 - } - ``` - - Key/value pairs can be separated by either a comma or a line break. Values - can be arbitrary expressions. Keys are strings; they can be left unquoted if - they are a valid [identifier](./syntax.html#identifiers), but must be quoted - otherwise. You can use a non-literal expression as a key by wrapping it in - parentheses, like `(var.business_unit_tag_name) = "SRE"`. - -## Indices and Attributes - -[inpage-index]: #indices-and-attributes - -Elements of list/tuple and map/object values can be accessed using -the square-bracket index notation, like `local.list[3]`. The expression within -the brackets must be a whole number for list and tuple values or a string -for map and object values. - -Map/object attributes with names that are valid identifiers can also be accessed -using the dot-separated attribute notation, like `local.object.attrname`. -In cases where a map might contain arbitrary user-specified keys, we recommend -using only the square-bracket index notation (`local.map["keyname"]`). - -## References to Named Values - -Terraform makes several kinds of named values available. Each of these names is -an expression that references the associated value; you can use them as -standalone expressions, or combine them with other expressions to compute new -values. - -The following named values are available: - -* `.` is an object representing a - [managed resource](./resources.html) of the given type - and name. The attributes of the resource can be accessed using - [dot or square bracket notation][inpage-index]. - - Any named value that does not match another pattern listed below - will be interpreted by Terraform as a reference to a managed resource. - - If the resource has the `count` argument set, the value of this expression - is a _list_ of objects representing its instances. - - If the resource has the `for_each` argument set, the value of this expression - is a _map_ of objects representing its instances. - - For more information, see - [references to resource attributes](#references-to-resource-attributes) below. -* `var.` is the value of the - [input variable](./variables.html) of the given name. -* `local.` is the value of the - [local value](./locals.html) of the given name. -* `module..` is the value of the specified - [output value](./outputs.html) from a - [child module](./modules.html) called by the current module. -* `data..` is an object representing a - [data resource](./data-sources.html) of the given data - source type and name. If the resource has the `count` argument set, the value - is a list of objects representing its instances. If the resource has the `for_each` - argument set, the value is a map of objects representing its instances. -* `path.module` is the filesystem path of the module where the expression - is placed. -* `path.root` is the filesystem path of the root module of the configuration. -* `path.cwd` is the filesystem path of the current working directory. In - normal use of Terraform this is the same as `path.root`, but some advanced - uses of Terraform run it from a directory other than the root module - directory, causing these paths to be different. -* `terraform.workspace` is the name of the currently selected - [workspace](/docs/state/workspaces.html). - -Although many of these names use dot-separated paths that resemble -[attribute notation][inpage-index] for elements of object values, they are not -implemented as real objects. This means you must use them exactly as written: -you cannot use square-bracket notation to replace the dot-separated paths, and -you cannot iterate over the "parent object" of a named entity (for example, you -cannot use `aws_instance` in a `for` expression). - -### Local Named Values - -Within the bodies of certain expressions, or in some other specific contexts, -there are other named values available beyond the global values listed above. -These local names are described in the documentation for the specific contexts -where they appear. Some of most common local names are: - -- `count.index`, in resources that use - [the `count` meta-argument](./resources.html#count-multiple-resource-instances-by-count). -- `each.key` / `each.value`, in resources that use - [the `for_each` meta-argument](./resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings). -- `self`, in [provisioner](../provisioners/index.html) and - [connection](../provisioners/connection.html) blocks. - --> **Note:** Local names are often referred to as _variables_ or -_temporary variables_ in their documentation. These are not [input -variables](./variables.html); they are just arbitrary names -that temporarily represent a value. - -### Named Values and Dependencies - -Constructs like resources and module calls often use references to named values -in their block bodies, and Terraform analyzes these expressions to automatically -infer dependencies between objects. For example, an expression in a resource -argument that refers to another managed resource creates an implicit dependency -between the two resources. - -### References to Resource Attributes - -The most common reference type is a reference to an attribute of a resource -which has been declared either with a `resource` or `data` block. Because -the contents of such blocks can be quite complicated themselves, expressions -referring to these contents can also be complicated. - -Consider the following example resource block: - -```hcl -resource "aws_instance" "example" { - ami = "ami-abc123" - instance_type = "t2.micro" - - ebs_block_device { - device_name = "sda2" - volume_size = 16 - } - ebs_block_device { - device_name = "sda3" - volume_size = 20 - } -} -``` - -The documentation for [`aws_instance`](/docs/providers/aws/r/instance.html) -lists all of the arguments and nested blocks supported for this resource type, -and also lists a number of attributes that are _exported_ by this resource -type. All of these different resource type schema constructs are available -for use in references, as follows: - -* The `ami` argument set in the configuration can be used elsewhere with - the reference expression `aws_instance.example.ami`. -* The `id` attribute exported by this resource type can be read using the - same syntax, giving `aws_instance.example.id`. -* The arguments of the `ebs_block_device` nested blocks can be accessed using - a [splat expression](#splat-expressions). For example, to obtain a list of - all of the `device_name` values, use - `aws_instance.example.ebs_block_device[*].device_name`. -* The nested blocks in this particular resource type do not have any exported - attributes, but if `ebs_block_device` were to have a documented `id` - attribute then a list of them could be accessed similarly as - `aws_instance.example.ebs_block_device[*].id`. -* Sometimes nested blocks are defined as taking a logical key to identify each - block, which serves a similar purpose as the resource's own name by providing - a convenient way to refer to that single block in expressions. If `aws_instance` - had a hypothetical nested block type `device` that accepted such a key, it - would look like this in configuration: - - ```hcl - device "foo" { - size = 2 - } - device "bar" { - size = 4 - } - ``` - - Arguments inside blocks with _keys_ can be accessed using index syntax, such - as `aws_instance.example.device["foo"].size`. - - To obtain a map of values of a particular argument for _labelled_ nested - block types, use a [`for` expression](#for-expressions): - `{for k, device in aws_instance.example.device : k => device.size}`. - -When a resource has the -[`count`](https://www.terraform.io/docs/configuration/resources.html#count-multiple-resource-instances-by-count) -argument set, the resource itself becomes a _list_ of instance objects rather than -a single object. In that case, access the attributes of the instances using -either [splat expressions](#splat-expressions) or index syntax: - -* `aws_instance.example[*].id` returns a list of all of the ids of each of the - instances. -* `aws_instance.example[0].id` returns just the id of the first instance. - -When a resource has the -[`for_each`](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) -argument set, the resource itself becomes a _map_ of instance objects rather than -a single object, and attributes of instances must be specified by key, or can -be accessed using a [`for` expression](#for-expressions). - -* `aws_instance.example["a"].id` returns the id of the "a"-keyed resource. -* `[for value in aws_instance.example: value.id]` returns a list of all of the ids - of each of the instances. - -Note that unlike `count`, splat expressions are _not_ directly applicable to resources managed with `for_each`, as splat expressions are for lists only. You may apply a splat expression to values in a map like so: - -* `values(aws_instance.example)[*].id` - -### Local Named Values - -Within the bodies of certain expressions, or in some other specific contexts, -there are other named values available beyond the global values listed above. -(For example, the body of a resource block where `count` is set can use a -special `count.index` value.) These local names are described in the -documentation for the specific contexts where they appear. - --> **Note:** Local named values are often referred to as _variables_ or -_temporary variables_ in their documentation. These are not [input -variables](./variables.html); they are just arbitrary names -that temporarily represent a value. - -### Values Not Yet Known - -When Terraform is planning a set of changes that will apply your configuration, -some resource attribute values cannot be populated immediately because their -values are decided dynamically by the remote system. For example, if a -particular remote object type is assigned a generated unique id on creation, -Terraform cannot predict the value of this id until the object has been created. - -To allow expressions to still be evaluated during the plan phase, Terraform -uses special "unknown value" placeholders for these results. In most cases you -don't need to do anything special to deal with these, since the Terraform -language automatically handles unknown values during expressions, so that -for example adding a known value to an unknown value automatically produces -an unknown value as the result. - -However, there are some situations where unknown values _do_ have a significant -effect: - -* The `count` meta-argument for resources cannot be unknown, since it must - be evaluated during the plan phase to determine how many instances are to - be created. - -* If unknown values are used in the configuration of a data resource, that - data resource cannot be read during the plan phase and so it will be deferred - until the apply phase. In this case, the results of the data resource will - _also_ be unknown values. - -* If an unknown value is assigned to an argument inside a `module` block, - any references to the corresponding input variable within the child module - will use that unknown value. - -* If an unknown value is used in the `value` argument of an output value, - any references to that output value in the parent module will use that - unknown value. - -* Terraform will attempt to validate that unknown values are of suitable - types where possible, but incorrect use of such values may not be detected - until the apply phase, causing the apply to fail. - -Unknown values appear in the `terraform plan` output as `(not yet known)`. + + + + ## Arithmetic and Logical Operators -An _operator_ is a type of expression that transforms or combines one or more -other expressions. Operators either combine two values in some way to -produce a third result value, or transform a single given value to -produce a single result. +Operators are expressions that transform other expressions, like adding two +numbers (`+`) or comparing two values to get a bool (`==`, `>=`, etc.). -Operators that work on two values place an operator symbol between the two -values, similar to mathematical notation: `1 + 2`. Operators that work on -only one value place an operator symbol before that value, like -`!true`. +This information has moved to +[Operators](/docs/language/expressions/references.html). -The Terraform language has a set of operators for both arithmetic and logic, -which are similar to operators in programming languages such as JavaScript -or Ruby. +
-When multiple operators are used together in an expression, they are evaluated -in the following order of operations: -1. `!`, `-` (multiplication by `-1`) -1. `*`, `/`, `%` -1. `+`, `-` (subtraction) -1. `>`, `>=`, `<`, `<=` -1. `==`, `!=` -1. `&&` -1. `||` - -Parentheses can be used to override the default order of operations. Without -parentheses, higher levels are evaluated first, so `1 + 2 * 3` is interpreted -as `1 + (2 * 3)` and _not_ as `(1 + 2) * 3`. - -The different operators can be gathered into a few different groups with -similar behavior, as described below. Each group of operators expects its -given values to be of a particular type. Terraform will attempt to convert -values to the required type automatically, or will produce an error message -if this automatic conversion is not possible. - -### Arithmetic Operators - -The arithmetic operators all expect number values and produce number values -as results: - -* `a + b` returns the result of adding `a` and `b` together. -* `a - b` returns the result of subtracting `b` from `a`. -* `a * b` returns the result of multiplying `a` and `b`. -* `a / b` returns the result of dividing `a` by `b`. -* `a % b` returns the remainder of dividing `a` by `b`. This operator is - generally useful only when used with whole numbers. -* `-a` returns the result of multiplying `a` by `-1`. - -### Equality Operators - -The equality operators both take two values of any type and produce boolean -values as results. - -* `a == b` returns `true` if `a` and `b` both have the same type and the same - value, or `false` otherwise. -* `a != b` is the opposite of `a == b`. - -### Comparison Operators - -The comparison operators all expect number values and produce boolean values -as results. - -* `a < b` returns `true` if `a` is less than `b`, or `false` otherwise. -* `a <= b` returns `true` if `a` is less than or equal to `b`, or `false` - otherwise. -* `a > b` returns `true` if `a` is greater than `b`, or `false` otherwise. -* `a >= b` returns `true` if `a` is greater than or equal to `b`, or `false` otherwise. - -### Logical Operators - -The logical operators all expect bool values and produce bool values as results. - -* `a || b` returns `true` if either `a` or `b` is `true`, or `false` if both are `false`. -* `a && b` returns `true` if both `a` and `b` are `true`, or `false` if either one is `false`. -* `!a` returns `true` if `a` is `false`, and `false` if `a` is `true`. ## Conditional Expressions -A _conditional expression_ uses the value of a bool expression to select one of -two values. +The `condition ? true_val : false_val` expression chooses between two +expressions based on a bool condition. -The syntax of a conditional expression is as follows: +This information has moved to +[Conditional Expressions](/docs/language/expressions/conditionals.html). -```hcl -condition ? true_val : false_val -``` +
-If `condition` is `true` then the result is `true_val`. If `condition` is -`false` then the result is `false_val`. -A common use of conditional expressions is to define defaults to replace -invalid values: -``` -var.a != "" ? var.a : "default-a" -``` - -If `var.a` is an empty string then the result is `"default-a"`, but otherwise -it is the actual value of `var.a`. - -Any of the equality, comparison, and logical operators can be used to define -the condition. The two result values may be of any type, but they must both -be of the _same_ type so that Terraform can determine what type the whole -conditional expression will return without knowing the condition value. + + ## Function Calls -The Terraform language has a number of -[built-in functions](./functions.html) that can be used -within expressions as another way to transform and combine values. These -are similar to the operators but all follow a common syntax: +Terraform's functions can be called like `function_name(arg1, arg2)`. -```hcl -(, ) -``` +This information has moved to +[Function Calls](/docs/language/expressions/function-calls.html). -The function name specifies which function to call. Each defined function -expects a specific number of arguments with specific value types, and returns a -specific value type as a result. +
-Some functions take an arbitrary number of arguments. For example, the `min` -function takes any amount of number arguments and returns the one that is -numerically smallest: -```hcl -min(55, 3453, 2) -``` -### Expanding Function Arguments - -If the arguments to pass to a function are available in a list or tuple value, -that value can be _expanded_ into separate arguments. Provide the list value as -an argument and follow it with the `...` symbol: - -```hcl -min([55, 2453, 2]...) -``` - -The expansion symbol is three periods (`...`), not a Unicode ellipsis character -(`…`). Expansion is a special syntax that is only available in function calls. - -### Available Functions - -For a full list of available functions, see -[the function reference](./functions.html). + ## `for` Expressions -A _`for` expression_ creates a complex type value by transforming -another complex type value. Each element in the input value -can correspond to either one or zero values in the result, and an arbitrary -expression can be used to transform each input element into an output element. +Expressions like `[for s in var.list : upper(s)]` can transform a complex type +value into another complex type value. -For example, if `var.list` is a list of strings, then the following expression -produces a list of strings with all-uppercase letters: +This information has moved to +[For Expressions](/docs/language/expressions/for.html). -```hcl -[for s in var.list : upper(s)] -``` +
-This `for` expression iterates over each element of `var.list`, and then -evaluates the expression `upper(s)` with `s` set to each respective element. -It then builds a new tuple value with all of the results of executing that -expression in the same order. -The type of brackets around the `for` expression decide what type of result -it produces. The above example uses `[` and `]`, which produces a tuple. If -`{` and `}` are used instead, the result is an object, and two result -expressions must be provided separated by the `=>` symbol: -```hcl -{for s in var.list : s => upper(s)} -``` - -This expression produces an object whose attributes are the original elements -from `var.list` and their corresponding values are the uppercase versions. - -A `for` expression can also include an optional `if` clause to filter elements -from the source collection, which can produce a value with fewer elements than -the source: - -``` -[for s in var.list : upper(s) if s != ""] -``` - -The source value can also be an object or map value, in which case two -temporary variable names can be provided to access the keys and values -respectively: - -``` -[for k, v in var.map : length(k) + length(v)] -``` - -Finally, if the result type is an object (using `{` and `}` delimiters) then -the value result expression can be followed by the `...` symbol to group -together results that have a common key: - -``` -{for s in var.list : substr(s, 0, 1) => s... if s != ""} -``` - -For expressions are particularly useful when combined with other language -features to combine collections together in various ways. For example, -the following two patterns are commonly used when constructing map values -to use with [resource `for_each`](./resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings): - -* Transform a multi-level nested structure into a flat list by - [using nested `for` expressions with the `flatten` function](./functions/flatten.html#flattening-nested-structures-for-for_each). -* Produce an exhaustive list of combinations of elements from two or more - collections by - [using the `setproduct` function inside a `for` expression](./functions/setproduct.html#finding-combinations-for-for_each). + + ## Splat Expressions -A _splat expression_ provides a more concise way to express a common -operation that could otherwise be performed with a `for` expression. +Expressions like `var.list[*].id` can extract simpler collections from complex +collections. -If `var.list` is a list of objects that all have an attribute `id`, then -a list of the ids could be produced with the following `for` expression: +This information has moved to +[Splat Expressions](/docs/language/expressions/splat.html). -```hcl -[for o in var.list : o.id] -``` +
-This is equivalent to the following _splat expression:_ -```hcl -var.list[*].id -``` -The special `[*]` symbol iterates over all of the elements of the list given -to its left and accesses from each one the attribute name given on its -right. A splat expression can also be used to access attributes and indexes -from lists of complex types by extending the sequence of operations to the -right of the symbol: + + -```hcl -var.list[*].interfaces[0].name -``` +## `dynamic` Blocks -The above expression is equivalent to the following `for` expression: +The special `dynamic` block type serves the same purpose as a `for` expression, +except it creates multiple repeatable nested blocks instead of a complex value. -```hcl -[for o in var.list : o.interfaces[0].name] -``` +This information has moved to +[Dynamic Blocks](/docs/language/expressions/dynamic-blocks.html). -Splat expressions are for lists only (and thus cannot be used [to reference resources -created with `for_each`](/docs/configuration/resources.html#referring-to-instances-1), -which are represented as maps in Terraform). However, if a splat expression is applied -to a value that is _not_ a list or tuple then the value is automatically wrapped in -a single-element list before processing. +
-For example, `var.single_object[*].id` is equivalent to `[var.single_object][*].id`, -or effectively `[var.single_object.id]`. This behavior is not interesting in most cases, -but it is particularly useful when referring to resources that may or may -not have `count` set, and thus may or may not produce a tuple value: -```hcl -aws_instance.example[*].id -``` -The above will produce a list of ids whether `aws_instance.example` has -`count` set or not, avoiding the need to revise various other expressions -in the configuration when a particular resource switches to and from -having `count` set. + + + + -### Legacy (Attribute-only) Splat Expressions +## String Literals and String Templates -An older variant of the splat expression is available for compatibility with -code written in older versions of the Terraform language. This is a less useful -version of the splat expression, and should be avoided in new configurations. - -An "attribute-only" splat expression is indicated by the sequence `.*` (instead -of `[*]`): - -``` -var.list.*.interfaces[0].name -``` - -This form has a subtly different behavior, equivalent to the following -`for` expression: - -``` -[for o in var.list : o.interfaces][0].name -``` - -Notice that with the attribute-only splat expression the index operation -`[0]` is applied to the result of the iteration, rather than as part of -the iteration itself. - -## `dynamic` blocks - -Within top-level block constructs like resources, expressions can usually be -used only when assigning a value to an argument using the `name = expression` -form. This covers many uses, but some resource types include repeatable _nested -blocks_ in their arguments, which do not accept expressions: - -```hcl -resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "tf-test-name" # can use expressions here - - setting { - # but the "setting" block is always a literal block - } -} -``` - -You can dynamically construct repeatable nested blocks like `setting` using a -special `dynamic` block type, which is supported inside `resource`, `data`, -`provider`, and `provisioner` blocks: - -```hcl -resource "aws_elastic_beanstalk_environment" "tfenvtest" { - name = "tf-test-name" - application = "${aws_elastic_beanstalk_application.tftest.name}" - solution_stack_name = "64bit Amazon Linux 2018.03 v2.11.4 running Go 1.12.6" - - dynamic "setting" { - for_each = var.settings - content { - namespace = setting.value["namespace"] - name = setting.value["name"] - value = setting.value["value"] - } - } -} -``` - -A `dynamic` block acts much like a `for` expression, but produces nested blocks -instead of a complex typed value. It iterates over a given complex value, and -generates a nested block for each element of that complex value. - -- The label of the dynamic block (`"setting"` in the example above) specifies - what kind of nested block to generate. -- The `for_each` argument provides the complex value to iterate over. -- The `iterator` argument (optional) sets the name of a temporary variable - that represents the current element of the complex value. If omitted, the name - of the variable defaults to the label of the `dynamic` block (`"setting"` in - the example above). -- The `labels` argument (optional) is a list of strings that specifies the block - labels, in order, to use for each generated block. You can use the temporary - iterator variable in this value. -- The nested `content` block defines the body of each generated block. You can - use the temporary iterator variable inside this block. - -Since the `for_each` argument accepts any collection or structural value, -you can use a `for` expression or splat expression to transform an existing -collection. - -The iterator object (`setting` in the example above) has two attributes: - -* `key` is the map key or list element index for the current element. If the - `for_each` expression produces a _set_ value then `key` is identical to - `value` and should not be used. -* `value` is the value of the current element. - -A `dynamic` block can only generate arguments that belong to the resource type, -data source, provider or provisioner being configured. It is _not_ possible -to generate meta-argument blocks such as `lifecycle` and `provisioner` -blocks, since Terraform must process these before it is safe to evaluate -expressions. - -The `for_each` value must be a map or set with one element per desired -nested block. If you need to declare resource instances based on a nested -data structure or combinations of elements from multiple data structures you -can use Terraform expressions and functions to derive a suitable value. -For some common examples of such situations, see the -[`flatten`](/docs/configuration/functions/flatten.html) -and -[`setproduct`](/docs/configuration/functions/setproduct.html) -functions. - -### Best Practices for `dynamic` Blocks - -Overuse of `dynamic` blocks can make configuration hard to read and maintain, so -we recommend using them only when you need to hide details in order to build a -clean user interface for a re-usable module. Always write nested blocks out -literally where possible. - -## String Literals - -The Terraform language has two different syntaxes for string literals. The -most common is to delimit the string with quote characters (`"`), like -`"hello"`. In quoted strings, the backslash character serves as an escape -sequence, with the following characters selecting the escape behavior: - -| Sequence | Replacement | -| ------------ | ----------------------------------------------------------------------------- | -| `\n` | Newline | -| `\r` | Carriage Return | -| `\t` | Tab | -| `\"` | Literal quote (without terminating the string) | -| `\\` | Literal backslash | -| `\uNNNN` | Unicode character from the basic multilingual plane (NNNN is four hex digits) | -| `\UNNNNNNNN` | Unicode character from supplementary planes (NNNNNNNN is eight hex digits) | - -The alternative syntax for string literals is the so-called "heredoc" style, -inspired by Unix shell languages. This style allows multi-line strings to -be expressed more clearly by using a custom delimiter word on a line of its -own to close the string: +Strings can be `"double-quoted"` or ```hcl <`/`else`/`endif` directive chooses between two templates based - on the value of a bool expression: - - ```hcl - "Hello, %{ if var.name != "" }${var.name}%{ else }unnamed%{ endif }!" - ``` - - The `else` portion may be omitted, in which case the result is an empty - string if the condition expression returns `false`. - -* The `for in ` / `endfor` directive iterates over the - elements of a given collection or structural value and evaluates a given - template once for each element, concatenating the results together: - - ```hcl - < diff --git a/website/docs/configuration/functions/list.html.md b/website/docs/configuration/functions/list.html.md deleted file mode 100644 index 0313bac13..000000000 --- a/website/docs/configuration/functions/list.html.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -layout: "functions" -page_title: "list - Functions - Configuration Language" -sidebar_current: "docs-funcs-collection-list" -description: |- - The list function constructs a list from some given elements. ---- - -# `list` Function - --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - -~> **This function is deprecated.** From Terraform v0.12, the Terraform -language has built-in syntax for creating lists using the `[` and `]` -delimiters. Use the built-in syntax instead. The `list` function will be -removed in a future version of Terraform. - -`list` takes an arbitrary number of arguments and returns a list containing -those values in the same order. - -## Examples - -``` -> list("a", "b", "c") -[ - "a", - "b", - "c", -] -``` - -Do not use the above form in Terraform v0.12 or above. Instead, use the -built-in list construction syntax, which achieves the same result: - -``` -> ["a", "b", "c"] -[ - "a", - "b", - "c", -] -``` - -## Related Functions - -* [`tolist`](./tolist.html) converts a set value to a list. diff --git a/website/docs/configuration/functions/map.html.md b/website/docs/configuration/functions/map.html.md deleted file mode 100644 index 4735b8788..000000000 --- a/website/docs/configuration/functions/map.html.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -layout: "functions" -page_title: "map - Functions - Configuration Language" -sidebar_current: "docs-funcs-collection-map" -description: |- - The map function constructs a map from some given elements. ---- - -# `map` Function - --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - -~> **This function is deprecated.** From Terraform v0.12, the Terraform -language has built-in syntax for creating maps using the `{` and `}` -delimiters. Use the built-in syntax instead. The `map` function will be -removed in a future version of Terraform. - -`map` takes an even number of arguments and returns a map whose elements -are constructed from consecutive pairs of arguments. - -## Examples - -``` -> map("a", "b", "c", "d") -{ - "a" = "b" - "c" = "d" -} -``` - -Do not use the above form in Terraform v0.12 or above. Instead, use the -built-in map construction syntax, which achieves the same result: - -``` -> {"a" = "b", "c" = "d"} -{ - "a" = "b" - "c" = "d" -} -``` - -## Related Functions - -* [`tomap`](./tomap.html) performs a type conversion to a map type. diff --git a/website/docs/configuration/index.html.md b/website/docs/configuration/index.html.md deleted file mode 100644 index 7cae418d0..000000000 --- a/website/docs/configuration/index.html.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -layout: "docs" -page_title: "Configuration Language" -sidebar_current: "docs-config-index" -description: |- - Terraform uses text files to describe infrastructure and to set variables. - These text files are called Terraform _configurations_ and are - written in the Terraform language. ---- - -# Configuration Language - --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language](../configuration-0-11/index.html). - -> **Hands-on:** Try the [Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. - -Terraform uses its own configuration language, designed to allow concise -descriptions of infrastructure. The Terraform language is declarative, -describing an intended goal rather than the steps to reach that goal. - -## Resources and Modules - -The main purpose of the Terraform language is declaring [resources](./resources.html). -All other language features exist only to make the definition of resources -more flexible and convenient. - -A group of resources can be gathered into a [module](./modules.html), -which creates a larger unit of configuration. A resource describes a single -infrastructure object, while a module might describe a set of objects and the -necessary relationships between them in order to create a higher-level system. - -A _Terraform configuration_ consists of a _root module_, where evaluation -begins, along with a tree of child modules created when one module calls -another. - -## Arguments, Blocks, and Expressions - -The syntax of the Terraform language consists of only a few basic elements: - -```hcl -resource "aws_vpc" "main" { - cidr_block = var.base_cidr_block -} - - "" "" { - # Block body - = # Argument -} -``` - -- _Blocks_ are containers for other content and usually represent the - configuration of some kind of object, like a resource. Blocks have a - _block type,_ can have zero or more _labels,_ and have a _body_ that contains - any number of arguments and nested blocks. Most of Terraform's features are - controlled by top-level blocks in a configuration file. -- _Arguments_ assign a value to a name. They appear within blocks. -- _Expressions_ represent a value, either literally or by referencing and - combining other values. They appear as values for arguments, or within other - expressions. - -For full details about Terraform's syntax, see: - -- [Configuration Syntax](./syntax.html) -- [Expressions](./expressions.html) - -## Code Organization - -The Terraform language uses configuration files that are named with the `.tf` -file extension. There is also [a JSON-based variant of the language](./syntax-json.html) -that is named with the `.tf.json` file extension. - -Configuration files must always use UTF-8 encoding, and by convention are -usually maintained with Unix-style line endings (LF) rather than Windows-style -line endings (CRLF), though both are accepted. - -A _module_ is a collection of `.tf` or `.tf.json` files kept together in a -directory. The root module is built from the configuration files in the -current working directory when Terraform is run, and this module may reference -child modules in other directories, which can in turn reference other modules, -etc. - -The simplest Terraform configuration is a single root module containing only -a single `.tf` file. A configuration can grow gradually as more resources -are added, either by creating new configuration files within the root module -or by organizing sets of resources into child modules. - -## Configuration Ordering - -Because Terraform's configuration language is declarative, the ordering of -blocks is generally not significant. (The order of `provisioner` blocks within a -resource is the only major feature where block order matters.) - -Terraform automatically processes resources in the correct order based on -relationships defined between them in configuration, and so you can organize -resources into source files in whatever way makes sense for your infrastructure. - -## Terraform CLI vs. Providers - -The Terraform command line interface (CLI) is a general engine for evaluating -and applying Terraform configurations. It defines the Terraform language syntax -and overall structure, and coordinates sequences of changes that must be made to -make remote infrastructure match the given configuration. - -This general engine has no knowledge about specific types of infrastructure -objects. Instead, Terraform uses plugins called -[providers](./providers.html) that each define and manage a -set of resource types. Most providers are associated with a particular cloud or -on-premises infrastructure service, allowing Terraform to manage infrastructure -objects within that service. - -Terraform doesn't have a concept of platform-independent resource types -— resources are always tied to a provider, since the features of similar -resources can vary greatly from provider to provider. But Terraform CLI's shared -configuration engine ensures that the same language constructs and syntax are -available across all services and allows resource types from different services -to be combined as needed. - -## Example - -The following simple example describes a simple network topology for Amazon Web -Services, just to give a sense of the overall structure and syntax of the -Terraform language. Similar configurations can be created for other virtual -network services, using resource types defined by other providers, and a -practical network configuration will often contain additional elements not -shown here. - -```hcl -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 1.0.4" - } - } -} - -variable "aws_region" {} - -variable "base_cidr_block" { - description = "A /16 CIDR range definition, such as 10.1.0.0/16, that the VPC will use" - default = "10.1.0.0/16" -} - -variable "availability_zones" { - description = "A list of availability zones in which to create subnets" - type = list(string) -} - -provider "aws" { - region = var.aws_region -} - -resource "aws_vpc" "main" { - # Referencing the base_cidr_block variable allows the network address - # to be changed without modifying the configuration. - cidr_block = var.base_cidr_block -} - -resource "aws_subnet" "az" { - # Create one subnet for each given availability zone. - count = length(var.availability_zones) - - # For each subnet, use one of the specified availability zones. - availability_zone = var.availability_zones[count.index] - - # By referencing the aws_vpc.main object, Terraform knows that the subnet - # must be created only after the VPC is created. - vpc_id = aws_vpc.main.id - - # Built-in functions and operators can be used for simple transformations of - # values, such as computing a subnet address. Here we create a /20 prefix for - # each subnet, using consecutive addresses for each availability zone, - # such as 10.1.16.0/20 . - cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index+1) -} -``` - -For more information on the configuration elements shown here, use the -site navigation to explore the Terraform language documentation sub-sections. -To start, see [_Resource Configuration_](./resources.html). diff --git a/website/docs/configuration/modules.html.md b/website/docs/configuration/modules.html.md index 88430218e..1dbcf06a1 100644 --- a/website/docs/configuration/modules.html.md +++ b/website/docs/configuration/modules.html.md @@ -1,632 +1,54 @@ --- -layout: "docs" -page_title: "Modules - Configuration Language" -sidebar_current: "docs-config-modules" -description: |- - Modules allow multiple resources to be grouped together and encapsulated. +layout: "language" +page_title: "Modules Landing Page - Configuration Language" --- -# Modules +# Modules Landing Page --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Modules](../configuration-0-11/modules.html). +To improve navigation, we've split the old Modules page into several smaller +pages. -> **Hands-on:** Try the [Reuse Configuration with Modules](https://learn.hashicorp.com/collections/terraform/modules?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + + + + + + -A _module_ is a container for multiple resources that are used together. +## Syntax and Elements of Module Blocks -Every Terraform configuration has at least one module, known as its -_root module_, which consists of the resources defined in the `.tf` files in -the main working directory. +This information has moved to +[Module Blocks](/docs/language/modules/syntax.html). -A module can call other modules, which lets you include the child module's -resources into the configuration in a concise way. Modules -can also be called multiple times, either within the same configuration or -in separate configurations, allowing resource configurations to be packaged -and re-used. +
-This page describes how to call one module from another. Other pages in this -section of the documentation describe the different elements that make up -modules, and there is further information about how modules can be used, -created, and published in [the dedicated _Modules_ -section](/docs/modules/index.html). -## Calling a Child Module -To _call_ a module means to include the contents of that module into the -configuration with specific values for its -[input variables](./variables.html). Modules are called -from within other modules using `module` blocks: + -```hcl -module "servers" { - source = "./app-cluster" +## Multiple Instances with `count` and `for_each` - servers = 5 -} -``` +This information has moved to +[`count`](/docs/language/meta-arguments/count.html) and +[`for_each`](/docs/language/meta-arguments/for_each.html). -A module that includes a `module` block like this is the _calling module_ of the -child module. +
-The label immediately after the `module` keyword is a local name, which the -calling module can use to refer to this instance of the module. -Within the block body (between `{` and `}`) are the arguments for the module. -Most of the arguments correspond to [input variables](./variables.html) -defined by the module, including the `servers` argument in the above example. -Terraform also defines a few meta-arguments that are reserved by Terraform -and used for its own purposes; we will discuss those throughout the rest of -this section. -All modules require a `source` argument, which is a meta-argument defined by -Terraform. Its value is either the path to a local directory containing the -module's configuration files, or a remote module source that Terraform should -download and use. This value must be a literal string with no template -sequences; arbitrary expressions are not allowed. For more information on -possible values for this argument, see [Module Sources](/docs/modules/sources.html). + + + + + + -The same source address can be specified in multiple `module` blocks to create -multiple copies of the resources defined within, possibly with different -variable values. +## Handling Provider Configurations in Re-usable Modules -After adding, removing, or modifying `module` blocks, you must re-run -`terraform init` to allow Terraform the opportunity to adjust the installed -modules. By default this command will not upgrade an already-installed module; -use the `-upgrade` option to instead upgrade to the newest available version. +This information has moved to +[The `providers` Meta-Argument](/docs/language/meta-arguments/module-providers.html) +(for users of re-usable modules) and +[Providers Within Modules](/docs/language/modules/develop/providers.html) +(for module developers). -## Accessing Module Output Values - -The resources defined in a module are encapsulated, so the calling module -cannot access their attributes directly. However, the child module can -declare [output values](./outputs.html) to selectively -export certain values to be accessed by the calling module. - -For example, if the `./app-cluster` module referenced in the example above -exported an output value named `instance_ids` then the calling module -can reference that result using the expression `module.servers.instance_ids`: - -```hcl -resource "aws_elb" "example" { - # ... - - instances = module.servers.instance_ids -} -``` - -For more information about referring to named values, see -[Expressions](./expressions.html). - -## Transferring Resource State Into Modules - -When refactoring an existing configuration to split code into child modules, -moving resource blocks between modules causes Terraform to see the new location -as an entirely different resource from the old. Always check the execution plan -after moving code across modules to ensure that no resources are deleted by -surprise. - -If you want to make sure an existing resource is preserved, use -[the `terraform state mv` command](/docs/commands/state/mv.html) to inform -Terraform that it has moved to a different module. - -When passing resource addresses to `terraform state mv`, resources within child -modules must be prefixed with `module..`. If a module was called -with `count` or `for_each` ([see below][inpage-multiple]), its resource -addresses must be prefixed with `module.[].` instead, where -`` matches the `count.index` or `each.key` value of a particular module -instance. - -Full resource addresses for module contents are used within the UI and on the -command line, but cannot be used within a Terraform configuration. Only -[outputs](./outputs.html) from a module can be referenced from -elsewhere in your configuration. - -## Other Meta-arguments - -Along with the `source` meta-argument described above, module blocks have -some optional meta-arguments that have special meaning across all modules, -described in more detail below: - -- `version` - A [version constraint string](./version-constraints.html) - that specifies acceptable versions of the module. Described in detail under - [Module Versions][inpage-versions] below. - -- `count` and `for_each` - Both of these arguments create multiple instances of a - module from a single `module` block. Described in detail under - [Multiple Instances of a Module][inpage-multiple] below. - -- `providers` - A map whose keys are provider configuration names - that are expected by child module and whose values are the corresponding - provider configurations in the calling module. This allows - [provider configurations to be passed explicitly to child modules](#passing-providers-explicitly). - If not specified, the child module inherits all of the default (un-aliased) - provider configurations from the calling module. Described in detail under - [Providers Within Modules][inpage-providers] - -- `depends_on` - Creates explicit dependencies between the entire - module and the listed targets. This will delay the final evaluation of the - module, and any sub-modules, until after the dependencies have been applied. - Modules have the same dependency resolution behavior - [as defined for managed resources](./resources.html#resource-dependencies). - -In addition to the above, the `lifecycle` argument is not currently used by -Terraform but is reserved for planned future features. - -Since modules are a complex feature in their own right, further detail -about how modules can be used, created, and published is included in -[the dedicated section on modules](/docs/modules/index.html). - -## Module Versions - -[inpage-versions]: #module-versions - -When using modules installed from a module registry, we recommend explicitly -constraining the acceptable version numbers to avoid unexpected or unwanted -changes. - -Use the `version` attribute in the `module` block to specify versions: - -```shell -module "consul" { - source = "hashicorp/consul/aws" - version = "0.0.5" - - servers = 3 -} -``` - -The `version` attribute accepts a [version constraint string](./version-constraints.html). -Terraform will use the newest installed version of the module that meets the -constraint; if no acceptable versions are installed, it will download the newest -version that meets the constraint. - -Version constraints are supported only for modules installed from a module -registry, such as the public [Terraform Registry](https://registry.terraform.io/) -or [Terraform Cloud's private module registry](/docs/cloud/registry/index.html). -Other module sources can provide their own versioning mechanisms within the -source string itself, or might not support versions at all. In particular, -modules sourced from local file paths do not support `version`; since -they're loaded from the same source repository, they always share the same -version as their caller. - -## Multiple Instances of a Module - -[inpage-multiple]: #multiple-instances-of-a-module - --> **Note:** Module support for the `for_each` and `count` meta-arguments was -added in Terraform 0.13. Previous versions can only use these arguments with -individual resources. - -Use the `for_each` or the `count` argument to create multiple instances of a -module from a single `module` block. These arguments have the same syntax and -type constraints as -[`for_each`](./resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) -and -[`count`](./resources.html#count-multiple-resource-instances-by-count) -when used with resources. - -```hcl -# my_buckets.tf -module "bucket" { - for_each = toset(["assets", "media"]) - source = "./publish_bucket" - name = "${each.key}_bucket" -} -``` - -```hcl -# publish_bucket/bucket-and-cloudfront.tf -variable "name" {} # this is the input parameter of the module - -resource "aws_s3_bucket" "example" { - # Because var.name includes each.key in the calling - # module block, its value will be different for - # each instance of this module. - bucket = var.name - - # ... -} - -resource "aws_iam_user" "deploy_user" { - # ... -} -``` - -This example defines a local child module in the `./publish_bucket` -subdirectory. That module has configuration to create an S3 bucket. The module -wraps the bucket and all the other implementation details required to configure -a bucket. - -We declare multiple module instances by using the `for_each` attribute, -which accepts a map (with string keys) or a set of strings as its value. Additionally, -we use the special `each.key` value in our module block, because the -[`each`](/docs/configuration/resources.html#the-each-object) object is available when -we have declared `for_each` on the module block. When using the `count` argument, the -[`count`](/docs/configuration/resources.html#the-count-object) object is available. - -Resources from child modules are prefixed with `module.module_name[module index]` -when displayed in plan output and elsewhere in the UI. For a module without -`count` or `for_each`, the address will not contain the module index as the module's -name suffices to reference the module. - -In our example, the `./publish_bucket` module contains `aws_s3_bucket.example`, and so the two -instances of this module produce S3 bucket resources with [resource addresses](/docs/internals/resource-addressing.html) of `module.bucket["assets"].aws_s3_bucket.example` -and `module.bucket["media"].aws_s3_bucket.example` respectively. - -## Providers Within Modules - -[inpage-providers]: #providers-within-modules - -In a configuration with multiple modules, there are some special considerations -for how resources are associated with provider configurations. - -Each resource in the configuration must be associated with one provider -configuration. Provider configurations, unlike most other concepts in -Terraform, are global to an entire Terraform configuration and can be shared -across module boundaries. Provider configurations can be defined only in a -root Terraform module. - -Providers can be passed down to descendent modules in two ways: either -_implicitly_ through inheritance, or _explicitly_ via the `providers` argument -within a `module` block. These two options are discussed in more detail in the -following sections. - -A module intended to be called by one or more other modules must not contain -any `provider` blocks, with the exception of the special -"proxy provider blocks" discussed under -_[Passing Providers Explicitly](#passing-providers-explicitly)_ -below. - -For backward compatibility with configurations targeting Terraform v0.10 and -earlier Terraform does not produce an error for a `provider` block in a shared -module if the `module` block only uses features available in Terraform v0.10, -but that is a legacy usage pattern that is no longer recommended. A legacy -module containing its own provider configurations is not compatible with the -`for_each`, `count`, and `depends_on` arguments that were introduced in -Terraform v0.13. For more information, see -[Legacy Shared Modules with Provider Configurations](#legacy-shared-modules-with-provider-configurations). - -Provider configurations are used for all operations on associated resources, -including destroying remote objects and refreshing state. Terraform retains, as -part of its state, a reference to the provider configuration that was most -recently used to apply changes to each resource. When a `resource` block is -removed from the configuration, this record in the state will be used to locate -the appropriate configuration because the resource's `provider` argument -(if any) will no longer be present in the configuration. - -As a consequence, you must ensure that all resources that belong to a -particular provider configuration are destroyed before you can remove that -provider configuration's block from your configuration. If Terraform finds -a resource instance tracked in the state whose provider configuration block is -no longer available then it will return an error during planning, prompting you -to reintroduce the provider configuration. - -### Provider Version Constraints in Modules - -Although provider _configurations_ are shared between modules, each module must -declare its own [provider requirements](provider-requirements.html), so that -Terraform can ensure that there is a single version of the provider that is -compatible with all modules in the configuration and to specify the -[source address](provider-requirements.html#source-addresses) that serves as -the global (module-agnostic) identifier for a provider. - -To declare that a module requires particular versions of a specific provider, -use a `required_providers` block inside a `terraform` block: - -```hcl -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 2.7.0" - } - } -} -``` - -A provider requirement says, for example, "This module requires version v2.7.0 -of the provider `hashicorp/aws` and will refer to it as `aws`." It doesn't, -however, specify any of the configuration settings that determine what remote -endpoints the provider will access, such as an AWS region; configuration -settings come from provider _configurations_, and a particular overall Terraform -configuration can potentially have -[several different configurations for the same provider](providers.html#alias-multiple-provider-instances). - -If you are writing a shared Terraform module, constrain only the minimum -required provider version using a `>=` constraint. This should specify the -minimum version containing the features your module relies on, and thus allow a -user of your module to potentially select a newer provider version if other -features are needed by other parts of their overall configuration. - -### Implicit Provider Inheritance - -For convenience in simple configurations, a child module automatically inherits -default (un-aliased) provider configurations from its parent. This means that -explicit `provider` blocks appear only in the root module, and downstream -modules can simply declare resources for that provider and have them -automatically associated with the root provider configurations. - -For example, the root module might contain only a `provider` block and a -`module` block to instantiate a child module: - -```hcl -provider "aws" { - region = "us-west-1" -} - -module "child" { - source = "./child" -} -``` - -The child module can then use any resource from this provider with no further -provider configuration required: - -```hcl -resource "aws_s3_bucket" "example" { - bucket = "provider-inherit-example" -} -``` - -We recommend using this approach when a single configuration for each provider -is sufficient for an entire configuration. - -~> **Note:** Only provider configurations are inherited by child modules, not provider source or version requirements. Each module must [declare its own provider requirements](provider-requirements.html). This is especially important for non-HashiCorp providers. - -In more complex situations there may be -[multiple provider configurations](/docs/configuration/providers.html#alias-multiple-provider-configurations), -or a child module may need to use different provider settings than -its parent. For such situations, you must pass providers explicitly. - -### Passing Providers Explicitly - -When child modules each need a different configuration of a particular -provider, or where the child module requires a different provider configuration -than its parent, you can use the `providers` argument within a `module` block -to explicitly define which provider configurations are available to the -child module. For example: - -```hcl -# The default "aws" configuration is used for AWS resources in the root -# module where no explicit provider instance is selected. -provider "aws" { - region = "us-west-1" -} - -# An alternate configuration is also defined for a different -# region, using the alias "usw2". -provider "aws" { - alias = "usw2" - region = "us-west-2" -} - -# An example child module is instantiated with the alternate configuration, -# so any AWS resources it defines will use the us-west-2 region. -module "example" { - source = "./example" - providers = { - aws = aws.usw2 - } -} -``` - -The `providers` argument within a `module` block is similar to -[the `provider` argument](resources.html#provider-selecting-a-non-default-provider-configuration) -within a resource, but is a map rather than a single string because a module may -contain resources from many different providers. - -The keys of the `providers` map are provider configuration names as expected by -the child module, and the values are the names of corresponding configurations -in the _current_ module. - -Once the `providers` argument is used in a `module` block, it overrides all of -the default inheritance behavior, so it is necessary to enumerate mappings -for _all_ of the required providers. This is to avoid confusion and surprises -that may result when mixing both implicit and explicit provider passing. - -Additional provider configurations (those with the `alias` argument set) are -_never_ inherited automatically by child modules, and so must always be passed -explicitly using the `providers` map. For example, a module -that configures connectivity between networks in two AWS regions is likely -to need both a source and a destination region. In that case, the root module -may look something like this: - -```hcl -provider "aws" { - alias = "usw1" - region = "us-west-1" -} - -provider "aws" { - alias = "usw2" - region = "us-west-2" -} - -module "tunnel" { - source = "./tunnel" - providers = { - aws.src = aws.usw1 - aws.dst = aws.usw2 - } -} -``` - -The subdirectory `./tunnel` must then contain _proxy configuration blocks_ like -the following, to declare that it requires its calling module to pass -configurations with these names in its `providers` argument: - -```hcl -provider "aws" { - alias = "src" -} - -provider "aws" { - alias = "dst" -} -``` - -Each resource should then have its own `provider` attribute set to either -`aws.src` or `aws.dst` to choose which of the two provider configurations to -use. - -### Proxy Configuration Blocks - -A proxy configuration block is one that contains only the `alias` argument. It -serves as a placeholder for provider configurations passed between modules, and -declares that a module expects to be explicitly passed an additional (aliased) -provider configuration. - --> **Note:** Although a completely empty proxy configuration block is also -valid, it is not necessary: proxy configuration blocks are needed only to -establish which _aliased_ provider configurations a child module expects. -Don't use a proxy configuration block if a module only needs a single default -provider configuration, and don't use proxy configuration blocks only to imply -[provider requirements](./provider-requirements.html). - -## Legacy Shared Modules with Provider Configurations - -In Terraform v0.10 and earlier there was no explicit way to use different -configurations of a provider in different modules in the same configuration, -and so module authors commonly worked around this by writing `provider` blocks -directly inside their modules, making the module have its own separate -provider configurations separate from those declared in the root module. - -However, that pattern had a significant drawback: because a provider -configuration is required to destroy the remote object associated with a -resource instance as well as to create or update it, a provider configuration -must always stay present in the overall Terraform configuration for longer -than all of the resources it manages. If a particular module includes -both resources and the provider configurations for those resources then -removing the module from its caller would violate that constraint: both the -resources and their associated providers would, in effect, be removed -simultaneously. - -Terraform v0.11 introduced the mechanisms described in earlier sections to -allow passing provider configurations between modules in a structured way, and -thus we explicitly recommended against writing a child module with its own -provider configuration blocks. However, that legacy pattern continued to work -for compatibility purposes -- though with the same drawback -- until Terraform -v0.13. - -Terraform v0.13 introduced the possibility for a module itself to use the -`for_each`, `count`, and `depends_on` arguments, but the implementation of -those unfortunately conflicted with the support for the legacy pattern. - -To retain the backward compatibility as much as possible, Terraform v0.13 -continues to support the legacy pattern for module blocks that do not use these -new features, but a module with its own provider configurations is not -compatible with `for_each`, `count`, or `depends_on`. Terraform will produce an -error if you attempt to combine these features. For example: - -``` -Error: Module does not support count - - on main.tf line 15, in module "child": - 15: count = 2 - -Module "child" cannot be used with count because it contains a nested provider -configuration for "aws", at child/main.tf:2,10-15. - -This module can be made compatible with count by changing it to receive all of -its provider configurations from the calling module, by using the "providers" -argument in the calling module block. -``` - -To make a module compatible with the new features, you must either remove all -of the `provider` blocks from its definition or, if you need multiple -configurations for the same provider, replace them with -_proxy configuration blocks_ as described in -[Passing Providers Explicitly](#passing-providers-explicitly). - -If the new version of the module uses proxy configuration blocks, or if the -calling module needs the child module to use different provider configurations -than its own default provider configurations, the calling module must then -include an explicit `providers` argument to describe which provider -configurations the child module will use: - -```hcl -provider "aws" { - region = "us-west-1" -} - -provider "aws" { - region = "us-east-1" - alias = "east" -} - -module "child" { - count = 2 - providers = { - # By default, the child module would use the - # default (unaliased) AWS provider configuration - # using us-west-1, but this will override it - # to use the additional "east" configuration - # for its resources instead. - aws = aws.east - } -} -``` - -Since the association between resources and provider configurations is -static, module calls using `for_each` or `count` cannot pass different -provider configurations to different instances. If you need different -instances of your module to use different provider configurations then you -must use a separate `module` block for each distinct set of provider -configurations: - -```hcl -provider "aws" { - alias = "usw1" - region = "us-west-1" -} - -provider "aws" { - alias = "usw2" - region = "us-west-2" -} - -provider "google" { - alias = "usw1" - credentials = "${file("account.json")}" - project = "my-project-id" - region = "us-west1" - zone = "us-west1-a" -} - -provider "google" { - alias = "usw2" - credentials = "${file("account.json")}" - project = "my-project-id" - region = "us-west2" - zone = "us-west2-a" -} - -module "bucket_w1" { - source = "./publish_bucket" - providers = { - aws.src = aws.usw1 - google.src = google.usw2 - } -} - -module "bucket_w2" { - source = "./publish_bucket" - providers = { - aws.src = aws.usw2 - google.src = google.usw2 - } -} -``` - -## Tainting resources within a module - -The [taint command](/docs/commands/taint.html) can be used to _taint_ specific -resources within a module: - -```shell -$ terraform taint module.salt_master.aws_instance.salt_master -``` - -It is not possible to taint an entire module. Instead, each resource within -the module must be tainted separately. +
diff --git a/website/docs/configuration/resources.html.md b/website/docs/configuration/resources.html.md index 42f71fcf0..87ae1bf5f 100644 --- a/website/docs/configuration/resources.html.md +++ b/website/docs/configuration/resources.html.md @@ -1,760 +1,135 @@ --- -layout: "docs" -page_title: "Resources - Configuration Language" -sidebar_current: "docs-config-resources" -description: |- - Resources are the most important element in a Terraform configuration. - Each resource corresponds to an infrastructure object, such as a virtual - network or compute instance. +layout: "language" +page_title: "Resources Landing Page - Configuration Language" --- -# Resources +# Resources Landing Page --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Resources](../configuration-0-11/resources.html). +To improve navigation, we've split the old Resources page into several smaller +pages. -> **Hands-on:** Try the [Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + + + + + + + + -_Resources_ are the most important element in the Terraform language. -Each resource block describes one or more infrastructure objects, such -as virtual networks, compute instances, or higher-level components such -as DNS records. +## Syntax and Elements of Resource Blocks -## Resource Syntax +This information has moved to +[Resource Blocks](/docs/language/resources/syntax.html). -Resource declarations can include a number of advanced features, but only -a small subset are required for initial use. More advanced syntax features, -such as single resource declarations that produce multiple similar remote -objects, are described later in this page. +
-```hcl -resource "aws_instance" "web" { - ami = "ami-a1b2c3d4" - instance_type = "t2.micro" -} -``` -A `resource` block declares a resource of a given type ("aws_instance") -with a given local name ("web"). The name is used to refer to this resource -from elsewhere in the same Terraform module, but has no significance outside -that module's scope. - -The resource type and name together serve as an identifier for a given -resource and so must be unique within a module. -Within the block body (between `{` and `}`) are the configuration arguments -for the resource itself. Most arguments in this section depend on the -resource type, and indeed in this example both `ami` and `instance_type` are -arguments defined specifically for [the `aws_instance` resource type](/docs/providers/aws/r/instance.html). - --> **Note:** Resource names must start with a letter or underscore, and may -contain only letters, digits, underscores, and dashes. + + + + -## Resource Types - -Each resource is associated with a single _resource type_, which determines -the kind of infrastructure object it manages and what arguments and other -attributes the resource supports. - -### Providers +## Details of Resource Behavior -Each resource type is implemented by a [provider](./provider-requirements.html), -which is a plugin for Terraform that offers a collection of resource types. A -provider usually provides resources to manage a single cloud or on-premises -infrastructure platform. Providers are distributed separately from Terraform -itself, but Terraform can automatically install most providers when initializing -a working directory. +This information has moved to +[Resource Behavior](/docs/language/resources/behavior.html). -In order to manage resources, a Terraform module must specify which providers it -requires. Additionally, most providers need some configuration in order to -access their remote APIs, and the root module must provide that configuration. +
-For more information, see: -- [Provider Requirements](./provider-requirements.html), for declaring which - providers a module uses. -- [Provider Configuration](./providers.html), for configuring provider settings. -Terraform usually automatically determines which provider to use based on a -resource type's name. (By convention, resource type names start with their -provider's preferred local name.) When using multiple configurations of a -provider (or non-preferred local provider names), you must use the `provider` -meta-argument to manually choose an alternate provider configuration. See -[the section on `provider` below][inpage-provider] for more details. +## Resource Meta-Arguments -### Resource Arguments - -Most of the arguments within the body of a `resource` block are specific to the -selected resource type. The resource type's documentation lists which arguments -are available and how their values should be formatted. - -The values for resource arguments can make full use of -[expressions](./expressions.html) and other dynamic Terraform -language features. - -There are also some _meta-arguments_ that are defined by Terraform itself -and apply across all resource types. (See [Meta-Arguments](#meta-arguments) below.) - -### Documentation for Resource Types +Each resource meta-argument has moved to its own page: -Every Terraform provider has its own documentation, describing its resource -types and their arguments. - -Most publicly available providers are distributed on the -[Terraform Registry](https://registry.terraform.io/browse/providers), which also -hosts their documentation. When viewing a provider's page on the Terraform -Registry, you can click the "Documentation" link in the header to browse its -documentation. Provider documentation on the registry is versioned, and you can -use the dropdown version menu in the header to switch which version's -documentation you are viewing. - -To browse the publicly available providers and their documentation, see -[the providers section of the Terraform Registry](https://registry.terraform.io/browse/providers). +- [`depends_on`](/docs/language/meta-arguments/depends_on.html) +- [`count`](/docs/language/meta-arguments/count.html) +- [`for_each`](/docs/language/meta-arguments/for_each.html) +- [`provider`](/docs/language/meta-arguments/resource-provider.html) +- [`lifecycle`](/docs/language/meta-arguments/lifecycle.html) +- [Provisioners](/docs/language/resources/provisioners/index.html) --> **Note:** Provider documentation used to be hosted directly on terraform.io, -as part of Terraform's core documentation. Although some provider documentation -might still be hosted here, the Terraform Registry is now the main home for all -public provider docs. (The exception is the built-in -[`terraform` provider](/docs/providers/terraform/index.html) for reading state -data, since it is not available on the Terraform Registry.) +
-## Resource Behavior -A `resource` block declares that you want a particular infrastructure object -to exist with the given settings. If you are writing a new configuration for -the first time, the resources it defines will exist _only_ in the configuration, -and will not yet represent real infrastructure objects in the target platform. -_Applying_ a Terraform configuration is the process of creating, updating, -and destroying real infrastructure objects in order to make their settings -match the configuration. - -When Terraform creates a new infrastructure object represented by a `resource` -block, the identifier for that real object is saved in Terraform's -[state](/docs/state/index.html), allowing it to be updated and destroyed -in response to future changes. For resource blocks that already have an -associated infrastructure object in the state, Terraform compares the -actual configuration of the object with the arguments given in the -configuration and, if necessary, updates the object to match the configuration. + -This general behavior applies for all resources, regardless of type. The -details of what it means to create, update, or destroy a resource are different -for each resource type, but this standard set of verbs is common across them -all. - -The meta-arguments within `resource` blocks, documented in the -sections below, allow some details of this standard resource behavior to be -customized on a per-resource basis. - -### Accessing Resource Attributes - -[Expressions](./expressions.html) within a Terraform module can access -information about resources in the same module, and you can use that information -to help configure other resources. Use the `..` -syntax to reference a resource attribute in an expression. - -In addition to arguments specified in the configuration, resources often provide -read-only attributes with information obtained from the remote API; this often -includes things that can't be known until the resource is created, like the -resource's unique random ID. - -Many providers also include [data sources](./data-sources.html), which are a -special type of resource used only for looking up information. - -For a list of the attributes a resource or data source type provides, consult -its documentation; these are generally included in a second list below its list -of configurable arguments. - -For more information about referencing resource attributes in expressions, see -[Expressions: References to Resource Attributes](./expressions.html#references-to-resource-attributes). - -### Resource Dependencies - -Most resources in a configuration don't have any particular relationship, and -Terraform can make changes to several unrelated resources in parallel. - -However, some resources must be processed after other specific resources; -sometimes this is because of how the resource works, and sometimes the -resource's configuration just requires information generated by another -resource. - -Most resource dependencies are handled automatically. Terraform analyses any -[expressions](./expressions.html) within a `resource` block to find references -to other objects, and treats those references as implicit ordering requirements -when creating, updating, or destroying resources. Since most resources with -behavioral dependencies on other resources also refer to those resources' data, -it's usually not necessary to manually specify dependencies between resources. - -However, some dependencies cannot be recognized implicitly in configuration. For -example, if Terraform must manage access control policies _and_ take actions -that require those policies to be present, there is a hidden dependency between -the access policy and a resource whose creation depends on it. In these rare -cases, [the `depends_on` meta-argument][inpage-depend] can explicitly specify a -dependency. - -## Meta-Arguments - -Terraform CLI defines the following meta-arguments, which can be used with -any resource type to change the behavior of resources: - -- [`depends_on`, for specifying hidden dependencies][inpage-depend] -- [`count`, for creating multiple resource instances according to a count][inpage-count] -- [`for_each`, to create multiple instances according to a map, or set of strings][inpage-for_each] -- [`provider`, for selecting a non-default provider configuration][inpage-provider] -- [`lifecycle`, for lifecycle customizations][inpage-lifecycle] -- [`provisioner` and `connection`, for taking extra actions after resource creation][inpage-provisioner] - -These arguments often have additional restrictions on what language features can -be used with them, which are described in each - -### `depends_on`: Explicit Resource Dependencies - -[inpage-depend]: #depends_on-explicit-resource-dependencies - -Use the `depends_on` meta-argument to handle hidden resource dependencies that -Terraform can't automatically infer. - -Explicitly specifying a dependency is only necessary when a resource relies on -some other resource's behavior but _doesn't_ access any of that resource's data -in its arguments. - -This argument is available in all `resource` blocks, regardless of resource -type. For example: - -```hcl -resource "aws_iam_role" "example" { - name = "example" - - # assume_role_policy is omitted for brevity in this example. See the - # documentation for aws_iam_role for a complete example. - assume_role_policy = "..." -} - -resource "aws_iam_instance_profile" "example" { - # Because this expression refers to the role, Terraform can infer - # automatically that the role must be created first. - role = aws_iam_role.example.name -} - -resource "aws_iam_role_policy" "example" { - name = "example" - role = aws_iam_role.example.name - policy = jsonencode({ - "Statement" = [{ - # This policy allows software running on the EC2 instance to - # access the S3 API. - "Action" = "s3:*", - "Effect" = "Allow", - }], - }) -} - -resource "aws_instance" "example" { - ami = "ami-a1b2c3d4" - instance_type = "t2.micro" +### `depends_on` - # Terraform can infer from this that the instance profile must - # be created before the EC2 instance. - iam_instance_profile = aws_iam_instance_profile.example - - # However, if software running in this EC2 instance needs access - # to the S3 API in order to boot properly, there is also a "hidden" - # dependency on the aws_iam_role_policy that Terraform cannot - # automatically infer, so it must be declared explicitly: - depends_on = [ - aws_iam_role_policy.example, - ] -} -``` - -The `depends_on` meta-argument, if present, must be a list of references -to other resources in the same module. Arbitrary expressions are not allowed -in the `depends_on` argument value, because its value must be known before -Terraform knows resource relationships and thus before it can safely -evaluate expressions. - -The `depends_on` argument should be used only as a last resort. When using it, -always include a comment explaining why it is being used, to help future -maintainers understand the purpose of the additional dependency. - -### `count`: Multiple Resource Instances By Count - -[inpage-count]: #count-multiple-resource-instances-by-count - --> **Note:** A given resource block cannot use both `count` and `for_each`. - -By default, a `resource` block configures one real infrastructure object. -However, sometimes you want to manage several similar objects, such as a fixed -pool of compute instances. Terraform has two ways to do this: -`count` and [`for_each`][inpage-for_each]. +This information has moved to +[`depends_on`](/docs/language/meta-arguments/depends_on.html). -> **Hands-on:** Try the [Manage Similar Resources With Count](https://learn.hashicorp.com/tutorials/terraform/count?in=terraform/0-13&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. +
-The `count` meta-argument accepts a whole number, and creates that many -instances of the resource. Each instance has a distinct infrastructure object -associated with it (as described above in -[Resource Behavior](#resource-behavior)), and each is separately created, -updated, or destroyed when the configuration is applied. -```hcl -resource "aws_instance" "server" { - count = 4 # create four similar EC2 instances - ami = "ami-a1b2c3d4" - instance_type = "t2.micro" + + + + + + + - tags = { - Name = "Server ${count.index}" - } -} -``` +### `count` -#### The `count` Object - -In resource blocks where `count` is set, an additional `count` object is -available in expressions, so you can modify the configuration of each instance. -This object has one attribute: - -- `count.index` — The distinct index number (starting with `0`) corresponding - to this instance. - -#### Referring to Instances - -When `count` is set, Terraform distinguishes between the resource block itself -and the multiple _resource instances_ associated with it. Instances are -identified by an index number, starting with `0`. - -- `.` (for example, `aws_instance.server`) refers to the resource block. -- `.[]` (for example, `aws_instance.server[0]`, - `aws_instance.server[1]`, etc.) refers to individual instances. - -This is different from resources without `count` or `for_each`, which can be -referenced without an index or key. - --> **Note:** Within nested `provisioner` or `connection` blocks, the special -`self` object refers to the current _resource instance,_ not the resource block -as a whole. - -#### Using Expressions in `count` - -The `count` meta-argument accepts numeric [expressions](./expressions.html). -However, unlike most resource arguments, the `count` value must be known -_before_ Terraform performs any remote resource actions. This means `count` -can't refer to any resource attributes that aren't known until after a -configuration is applied (such as a unique ID generated by the remote API when -an object is created). - -#### When to Use `for_each` Instead of `count` - -If your resource instances are almost identical, `count` is appropriate. If some -of their arguments need distinct values that can't be directly derived from an -integer, it's safer to use `for_each`. - -Before `for_each` was available, it was common to derive `count` from the -length of a list and use `count.index` to look up the original list value: - -```hcl -variable "subnet_ids" { - type = list(string) -} - -resource "aws_instance" "server" { - # Create one instance for each subnet - count = length(var.subnet_ids) - - ami = "ami-a1b2c3d4" - instance_type = "t2.micro" - subnet_id = var.subnet_ids[count.index] +This information has moved to +[`count`](/docs/language/meta-arguments/count.html). - tags = { - Name = "Server ${count.index}" - } -} -``` +
-This was fragile, because the resource instances were still identified by their -_index_ instead of the string values in the list. If an element was removed from -the middle of the list, every instance _after_ that element would see its -`subnet_id` value change, resulting in more remote object changes than intended. -Using `for_each` gives the same flexibility without the extra churn. -### `for_each`: Multiple Resource Instances Defined By a Map, or Set of Strings -[inpage-for_each]: #for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings - --> **Version note:** `for_each` was added in Terraform 0.12.6. - --> **Note:** A given resource block cannot use both `count` and `for_each`. - -By default, a `resource` block configures one real infrastructure object. -However, sometimes you want to manage several similar objects, such as a fixed -pool of compute instances. Terraform has two ways to do this: -[`count`][inpage-count] and `for_each`. - -> **Hands-on:** Try the [Manage Similar Resources With For Each](https://learn.hashicorp.com/tutorials/terraform/for-each?in=terraform/0-13&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. - -The `for_each` meta-argument accepts a map or a set of strings, and creates an -instance for each item in that map or set. Each instance has a distinct -infrastructure object associated with it (as described above in -[Resource Behavior](#resource-behavior)), and each is separately created, -updated, or destroyed when the configuration is applied. - --> **Note:** The keys of the map (or all the values in the case of a set of strings) must -be _known values_, or you will get an error message that `for_each` has dependencies -that cannot be determined before apply, and a `-target` may be needed. `for_each` keys -cannot be the result (or rely on the result of) of impure functions, including `uuid`, `bcrypt`, -or `timestamp`, as their evaluation is deferred resource during evaluation. - -Map: - -```hcl -resource "azurerm_resource_group" "rg" { - for_each = { - a_group = "eastus" - another_group = "westus2" - } - name = each.key - location = each.value -} -``` -Set of strings: - -```hcl -resource "aws_iam_user" "the-accounts" { - for_each = toset( ["Todd", "James", "Alice", "Dottie"] ) - name = each.key -} -``` -#### The `each` Object - -In resource blocks where `for_each` is set, an additional `each` object is -available in expressions, so you can modify the configuration of each instance. -This object has two attributes: - -- `each.key` — The map key (or set member) corresponding to this instance. -- `each.value` — The map value corresponding to this instance. (If a set was - provided, this is the same as `each.key`.) - -#### Using Expressions in `for_each` - -The `for_each` meta-argument accepts map or set [expressions](./expressions.html). -However, unlike most resource arguments, the `for_each` value must be known -_before_ Terraform performs any remote resource actions. This means `for_each` -can't refer to any resource attributes that aren't known until after a -configuration is applied (such as a unique ID generated by the remote API when -an object is created). - -The `for_each` value must be a map or set with one element per desired -resource instance. If you need to declare resource instances based on a nested -data structure or combinations of elements from multiple data structures you -can use Terraform expressions and functions to derive a suitable value. -For example: - -* Transform a multi-level nested structure into a flat list by - [using nested `for` expressions with the `flatten` function](./functions/flatten.html#flattening-nested-structures-for-for_each). -* Produce an exhaustive list of combinations of elements from two or more - collections by - [using the `setproduct` function inside a `for` expression](./functions/setproduct.html#finding-combinations-for-for_each). - -#### Referring to Instances - -When `for_each` is set, Terraform distinguishes between the resource block itself -and the multiple _resource instances_ associated with it. Instances are -identified by a map key (or set member) from the value provided to `for_each`. - -- `.` (for example, `azurerm_resource_group.rg`) refers to the resource block. -- `.[]` (for example, `azurerm_resource_group.rg["a_group"]`, - `azurerm_resource_group.rg["another_group"]`, etc.) refers to individual instances. - -This is different from resources without `count` or `for_each`, which can be -referenced without an index or key. - --> **Note:** Within nested `provisioner` or `connection` blocks, the special -`self` object refers to the current _resource instance,_ not the resource block -as a whole. - -#### Using Sets - -The Terraform language doesn't have a literal syntax for -[set values](./types.html#collection-types), but you can use the `toset` -function to explicitly convert a list of strings to a set: - -```hcl -locals { - subnet_ids = toset([ - "subnet-abcdef", - "subnet-012345", - ]) -} - -resource "aws_instance" "server" { - for_each = local.subnet_ids - - ami = "ami-a1b2c3d4" - instance_type = "t2.micro" - subnet_id = each.key # note: each.key and each.value are the same for a set - - tags = { - Name = "Server ${each.key}" - } -} -``` - -Conversion from list to set discards the ordering of the items in the list and -removes any duplicate elements. `toset(["b", "a", "b"])` will produce a set -containing only `"a"` and `"b"` in no particular order; the second `"b"` is -discarded. - -If you are writing a module with an [input variable](./variables.html) that -will be used as a set of strings for `for_each`, you can set its type to -`set(string)` to avoid the need for an explicit type conversion: - -``` -variable "subnet_ids" { - type = set(string) -} - -resource "aws_instance" "server" { - for_each = var.subnet_ids - - # (and the other arguments as above) -} -``` - -### `provider`: Selecting a Non-default Provider Configuration - -[inpage-provider]: #provider-selecting-a-non-default-provider-configuration - -The `provider` meta-argument specifies which provider configuration to use, -overriding Terraform's default behavior of selecting one based on the resource -type name. Its value should be an unquoted `.` reference. - -As described in [Provider Configuration](./providers.html), you can optionally -create multiple configurations for a single provider (usually to manage -resources in different regions of multi-region services). Each provider can have -one default configuration, and any number of alternate configurations that -include an extra name segment (or "alias"). - -By default, Terraform interprets the initial word in the resource type name -(separated by underscores) as the local name of a provider, and uses that -provider's default configuration. For example, the resource type -`google_compute_instance` is associated automatically with the default -configuration for the provider named `google`. - -By using the `provider` meta-argument, you can select an alternate provider -configuration for a resource: - -```hcl -# default configuration -provider "google" { - region = "us-central1" -} - -# alternate configuration, whose alias is "europe" -provider "google" { - alias = "europe" - region = "europe-west1" -} - -resource "google_compute_instance" "example" { - # This "provider" meta-argument selects the google provider - # configuration whose alias is "europe", rather than the - # default configuration. - provider = google.europe - - # ... -} -``` - -A resource always has an implicit dependency on its associated provider, to -ensure that the provider is fully configured before any resource actions -are taken. - -The `provider` meta-argument expects -[a `.` reference](./providers.html#referring-to-alternate-providers), -which does not need to be quoted. Arbitrary expressions are not permitted for -`provider` because it must be resolved while Terraform is constructing the -dependency graph, before it is safe to evaluate expressions. - -### `lifecycle`: Lifecycle Customizations - -[inpage-lifecycle]: #lifecycle-lifecycle-customizations - -The general lifecycle for resources is described above in the -[Resource Behavior](#resource-behavior) section. Some details of that behavior -can be customized using the special nested `lifecycle` block within a resource -block body: - -``` -resource "azurerm_resource_group" "example" { - # ... - - lifecycle { - create_before_destroy = true - } -} -``` - -The `lifecycle` block and its contents are meta-arguments, available -for all `resource` blocks regardless of type. The following lifecycle -meta-arguments are supported: - -* `create_before_destroy` (bool) - By default, when Terraform must make a - change to a resource argument that cannot be updated in-place due to - remote API limitations, Terraform will instead destroy the existing object - and then create a new replacement object with the new configured arguments. - - The `create_before_destroy` meta-argument changes this behavior so that - the new replacement object is created _first,_ and then the prior object - is destroyed only once the replacement is created. - - This is an opt-in behavior because many remote object types have unique - name requirements or other constraints that must be accommodated for - both a new and an old object to exist concurrently. Some resource types - offer special options to append a random suffix onto each object name to - avoid collisions, for example. Terraform CLI cannot automatically activate - such features, so you must understand the constraints for each resource - type before using `create_before_destroy` with it. - -* `prevent_destroy` (bool) - This meta-argument, when set to `true`, will - cause Terraform to reject with an error any plan that would destroy the - infrastructure object associated with the resource, as long as the argument - remains present in the configuration. - - This can be used as a measure of safety against the accidental replacement - of objects that may be costly to reproduce, such as database instances. - However, it will make certain configuration changes impossible to apply, - and will prevent the use of the `terraform destroy` command once such - objects are created, and so this option should be used sparingly. - - Since this argument must be present in configuration for the protection to - apply, note that this setting does not prevent the remote object from - being destroyed if the `resource` block were removed from configuration - entirely: in that case, the `prevent_destroy` setting is removed along - with it, and so Terraform will allow the destroy operation to succeed. - -* `ignore_changes` (list of attribute names) - By default, Terraform detects - any difference in the current settings of a real infrastructure object - and plans to update the remote object to match configuration. - - The `ignore_changes` feature is intended to be used when a resource is - created with references to data that may change in the future, but should - not effect said resource after its creation. In some rare cases, settings - of a remote object are modified by processes outside of Terraform, which - Terraform would then attempt to "fix" on the next run. In order to make - Terraform share management responsibilities of a single object with a - separate process, the `ignore_changes` meta-argument specifies resource - attributes that Terraform should ignore when planning updates to the - associated remote object. - - The arguments corresponding to the given attribute names are considered - when planning a _create_ operation, but are ignored when planning an - _update_. The arguments are the relative address of the attributes in the - resource. Map and list elements can be referenced using index notation, - like `tags["Name"]` and `list[0]` respectively. - - - ```hcl - resource "aws_instance" "example" { - # ... - - lifecycle { - ignore_changes = [ - # Ignore changes to tags, e.g. because a management agent - # updates these based on some ruleset managed elsewhere. - tags, - ] - } - } - ``` - - Instead of a list, the special keyword `all` may be used to instruct - Terraform to ignore _all_ attributes, which means that Terraform can - create and destroy the remote object but will never propose updates to it. - - Only attributes defined by the resource type can be ignored. - `ignore_changes` cannot be applied to itself or to any other meta-arguments. - -The `lifecycle` settings all effect how Terraform constructs and traverses -the dependency graph. As a result, only literal values can be used because -the processing happens too early for arbitrary expression evaluation. - -### `provisioner` and `connection`: Resource Provisioners - -[inpage-provisioner]: #provisioner-and-connection-resource-provisioners - -> **Hands-on:** To learn about more declarative ways to handle provisioning actions, try the [Provision Infrastructure Deployed with Terraform](https://learn.hashicorp.com/collections/terraform/provision?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. - -Some infrastructure objects require some special actions to be taken after they -are created before they can become fully functional. For example, compute -instances may require configuration to be uploaded or a configuration management -program to be run before they can begin their intended operation. - -Create-time actions like these can be described using _resource provisioners_. -A provisioner is another type of plugin supported by Terraform, and each -provisioner takes a different kind of action in the context of a resource -being created. - -Provisioning steps should be used sparingly, since they represent -non-declarative actions taken during the creation of a resource and so -Terraform is not able to model changes to them as it can for the declarative -portions of the Terraform language. - -Provisioners can also be defined to run when a resource is _destroyed_, with -certain limitations. - -The `provisioner` and `connection` block types within `resource` blocks are -meta-arguments available across all resource types. Provisioners and their -usage are described in more detail in -[the Provisioners section](/docs/provisioners/index.html). - -## Local-only Resources - -While most resource types correspond to an infrastructure object type that -is managed via a remote network API, there are certain specialized resource -types that operate only within Terraform itself, calculating some results and -saving those results in the state for future use. - -For example, local-only resource types exist for -[generating private keys](/docs/providers/tls/r/private_key.html), -[issuing self-signed TLS certificates](/docs/providers/tls/r/self_signed_cert.html), -and even [generating random ids](/docs/providers/random/r/id.html). -While these resource types often have a more marginal purpose than those -managing "real" infrastructure objects, they can be useful as glue to help -connect together other resources. - -The behavior of local-only resources is the same as all other resources, but -their result data exists only within the Terraform state. "Destroying" such -a resource means only to remove it from the state, discarding its data. - -## Operation Timeouts - -Some resource types provide a special `timeouts` nested block argument that -allows you to customize how long certain operations are allowed to take -before being considered to have failed. -For example, [`aws_db_instance`](/docs/providers/aws/r/db_instance.html) -allows configurable timeouts for `create`, `update` and `delete` operations. - -Timeouts are handled entirely by the resource type implementation in the -provider, but resource types offering these features follow the convention -of defining a child block called `timeouts` that has a nested argument -named after each operation that has a configurable timeout value. -Each of these arguments takes a string representation of a duration, such -as `"60m"` for 60 minutes, `"10s"` for ten seconds, or `"2h"` for two hours. - -```hcl -resource "aws_db_instance" "example" { - # ... - - timeouts { - create = "60m" - delete = "2h" - } -} -``` - -The set of configurable operations is chosen by each resource type. Most -resource types do not support the `timeouts` block at all. Consult the -documentation for each resource type to see which operations it offers -for configuration, if any. + + + + + + + + +### `for_each` + +This information has moved to +[`for_each`](/docs/language/meta-arguments/for_each.html). + +
+ + + + + +### `provider` + +This information has moved to +[`provider`](/docs/language/meta-arguments/resource-provider.html). + +
+ + + + + + + + +### `lifecycle` + +This information has moved to +[`lifecycle`](/docs/language/meta-arguments/lifecycle.html). + +
+ + + + + +### Provisioners + +This information has moved to +[Provisioners](/docs/language/resources/provisioners/index.html). + +
diff --git a/website/docs/internals/credentials-helpers.html.md b/website/docs/internals/credentials-helpers.html.md index c7572484e..8e8b71aa0 100644 --- a/website/docs/internals/credentials-helpers.html.md +++ b/website/docs/internals/credentials-helpers.html.md @@ -12,7 +12,7 @@ For Terraform-specific features that interact with remote network services, such as [module registries](/docs/registry/) and [remote operations](/docs/cloud/run/cli.html), Terraform by default looks for API credentials to use in these calls in -[the CLI configuration](/docs/commands/cli-config.html). +[the CLI configuration](/docs/cli/config/config-file.html). Credentials helpers offer an alternative approach that allows you to customize how Terraform obtains credentials using an external program, which can then @@ -20,7 +20,7 @@ directly access an existing secrets management system in your organization. This page is about how to write and install a credentials helper. To learn how to configure a credentials helper that was already installed, see -[the CLI config Credentials Helpers section](/docs/commands/cli-config.html#credentials-helpers). +[the CLI config Credentials Helpers section](/docs/cli/config/config-file.html#credentials-helpers). ## How Terraform finds Credentials Helpers @@ -57,7 +57,7 @@ The current set of verbs are: To represent credentials, the credentials helper protocol uses a JSON object whose contents correspond with the contents of -[`credentials` blocks in the CLI configuration](/docs/commands/cli-config.html#credentials). +[`credentials` blocks in the CLI configuration](/docs/cli/config/config-file.html#credentials). To represent an API token, the object contains a property called "token" whose value is the token string: diff --git a/website/docs/internals/graph.html.md b/website/docs/internals/graph.html.md index e2de628f5..56ec892fa 100644 --- a/website/docs/internals/graph.html.md +++ b/website/docs/internals/graph.html.md @@ -108,8 +108,8 @@ The amount of parallelism is limited using a semaphore to prevent too many concurrent operations from overwhelming the resources of the machine running Terraform. By default, up to 10 nodes in the graph will be processed concurrently. This number can be set using the `-parallelism` flag on the -[plan](/docs/commands/plan.html), [apply](/docs/commands/apply.html), and -[destroy](/docs/commands/destroy.html) commands. +[plan](/docs/cli/commands/plan.html), [apply](/docs/cli/commands/apply.html), and +[destroy](/docs/cli/commands/destroy.html) commands. Setting `-parallelism` is considered an advanced operation and should not be necessary for normal usage of Terraform. It may be helpful in certain special diff --git a/website/docs/internals/json-format.html.md b/website/docs/internals/json-format.html.md index 4c345220e..88d3e0e75 100644 --- a/website/docs/internals/json-format.html.md +++ b/website/docs/internals/json-format.html.md @@ -14,7 +14,7 @@ When Terraform plans to make changes, it prints a human-readable summary to the Since the format of plan files isn't suited for use with external tools (and likely never will be), Terraform can output a machine-readable JSON representation of a plan file's changes. It can also convert state files to the same format, to simplify data loading and provide better long-term compatibility. -Use `terraform show -json ` to generate a JSON representation of a plan or state file. See [the `terraform show` documentation](/docs/commands/show.html) for more details. +Use `terraform show -json ` to generate a JSON representation of a plan or state file. See [the `terraform show` documentation](/docs/cli/commands/show.html) for more details. -> **Note:** The output includes a `format_version` key, which currently has major version zero to indicate that the format is experimental and subject to change. A future version will assign a non-zero major version and make stronger promises about compatibility. We do not anticipate any significant breaking changes to the format before its first major version, however. @@ -56,7 +56,7 @@ The extra wrapping object here will allow for any extension we may need to add i A plan consists of a prior state, the configuration that is being applied to that state, and the set of changes Terraform plans to make to achieve that. -For ease of consumption by callers, the plan representation includes a partial representation of the values in the final state (using a [value representation](#value-representation)), allowing callers to easily analyze the planned outcome using similar code as for analyzing the prior state. +For ease of consumption by callers, the plan representation includes a partial representation of the values in the final state (using a [value representation](#values-representation)), allowing callers to easily analyze the planned outcome using similar code as for analyzing the prior state. ```javascript { @@ -244,7 +244,7 @@ The following example illustrates the structure of a ``: } ``` -The translation of attribute and output values is the same intuitive mapping from HCL types to JSON types used by Terraform's [`jsonencode`](/docs/configuration/functions/jsonencode.html) function. This mapping does lose some information: lists, sets, and tuples all lower to JSON arrays while maps and objects both lower to JSON objects. Unknown values and null values are both treated as absent or null. +The translation of attribute and output values is the same intuitive mapping from HCL types to JSON types used by Terraform's [`jsonencode`](/docs/language/functions/jsonencode.html) function. This mapping does lose some information: lists, sets, and tuples all lower to JSON arrays while maps and objects both lower to JSON objects. Unknown values and null values are both treated as absent or null. Only the "current" object for each resource instance is described. "Deposed" objects are not reflected in this structure at all; in plan representations, you can refer to the change representations for further details. diff --git a/website/docs/internals/login-protocol.html.markdown b/website/docs/internals/login-protocol.html.markdown index 560a471be..59e3a39a3 100644 --- a/website/docs/internals/login-protocol.html.markdown +++ b/website/docs/internals/login-protocol.html.markdown @@ -9,7 +9,7 @@ description: |- # Server-side Login Protocol ~> **Note:** You don't need to read these docs to _use_ -[`terraform login`](/docs/commands/login.html). The information below is for +[`terraform login`](/docs/cli/commands/login.html). The information below is for anyone intending to implement the server side of `terraform login` in order to offer Terraform-native services in a third-party system. diff --git a/website/docs/internals/module-registry-protocol.html.md b/website/docs/internals/module-registry-protocol.html.md index 0369f1127..4da947214 100644 --- a/website/docs/internals/module-registry-protocol.html.md +++ b/website/docs/internals/module-registry-protocol.html.md @@ -10,7 +10,7 @@ description: |- # Module Registry Protocol --> Third-party provider registries are supported only in Terraform CLI 0.11 and later. Prior versions do not support this protocol. +-> Third-party module registries are supported only in Terraform CLI 0.11 and later. Prior versions do not support this protocol. The module registry protocol is what Terraform CLI uses to discover metadata about modules available for installation and to locate the distribution @@ -34,23 +34,25 @@ Terraform CLI itself does not use them. Each Terraform module has an associated address. A module address has the syntax `hostname/namespace/name/system`, where: -* `hostname` is the hostname of the provider registry that serves this module. +* `hostname` is the hostname of the module registry that serves this module. * `namespace` is the name of a namespace, unique on a particular hostname, that can contain one or more modules that are somehow related. On the public Terraform Registry the "namespace" represents the organization that is packaging and distributing the module. * `name` is the module name, which generally names the abstraction that the module is intending to create. -* `system` is the name of a system that the module is primarily written to - target. For multi-cloud abstractions, there can be multiple modules with - addresses that differ only in "system" to reflect system-specific +* `system` is the name of a remote system that the module is primarily written + to target. For multi-cloud abstractions, there can be multiple modules with + addresses that differ only in "system" to reflect provider-specific implementations of the abstraction, like `registry.terraform.io/hashicorp/consul/aws` vs. `registry.terraform.io/hashicorp/consul/azurerm`. The system name commonly - matches the type portion of the address of an official provider, but that - is not required. + matches the type portion of the address of an official provider, like `aws` + or `azurerm` in the above examples, but that is not required and so you can + use whichever system keywords make sense for the organization of your + particular registry. -The `hostname/` portion of a provider address (including its slash delimiter) +The `hostname/` portion of a module address (including its slash delimiter) is optional, and if omitted defaults to `registry.terraform.io/`. For example: @@ -61,12 +63,12 @@ For example: * `example.com/awesomecorp/consul/happycloud` is a hypothetical module published on a third-party registry. -If you intend only to share a module you've developed for use by all -Terraform users, please consider publishing it into the public -[Terraform Registry](https://registry.terraform.io/), which will make your -module discoverable. You only need to implement this module registry -protocol if you wish to publish modules whose addresses include a different -hostname that is under your control. +If you intend to share a module you've developed for use by all Terraform +users, please consider publishing it into the public +[Terraform Registry](https://registry.terraform.io/) to make your module more +discoverable. You only need to implement this module registry protocol if you +wish to publish modules whose addresses include a different hostname that is +under your control. ## Module Versions @@ -80,7 +82,7 @@ blocks have the same source address. ## Service Discovery -The providers protocol begins with Terraform CLI using +The module registry protocol begins with Terraform CLI using [Terraform's remote service discovery protocol](./remote-service-discovery.html), with the hostname in the module address acting as the "User-facing Hostname". @@ -123,7 +125,7 @@ available versions for a given fully-qualified module. | Method | Path | Produces | | ------ | ------------------------------------- | -------------------------- | -| `GET` | `:namespace/:name/:provider/versions` | `application/json` | +| `GET` | `:namespace/:name/:provider/versions` | `application/json` | ### Parameters @@ -177,7 +179,7 @@ This endpoint downloads the specified version of a module for a single provider. | Method | Path | Produces | | ------ | ------------------------------------------------------ | -------------------------- | -| `GET` | `:namespace/:name/:provider/:system/:version/download` | `application/json` | +| `GET` | `:namespace/:name/:system/:version/download` | `application/json` | ### Parameters @@ -187,10 +189,7 @@ This endpoint downloads the specified version of a module for a single provider. - `name` `(string: )` - The name of the module. This is required and is specified as part of the URL path. -- `provider` `(string: )` - The name of the provider. - This is required and is specified as part of the URL path. - -- `system` `(string: )` - The name of the target system. +- `provider` `(string: )` - The name of the target system. This is required and is specified as part of the URL path. - `version` `(string: )` - The version of the module. @@ -214,10 +213,10 @@ A successful response has no body, and includes the location from which the module version's source can be downloaded in the `X-Terraform-Get` header. The value of this header accepts the same values as the `source` argument in a `module` block in Terraform configuration, as described in -[Module Sources](https://www.terraform.io/docs/modules/sources.html), +[Module Sources](https://www.terraform.io/docs/language/modules/sources.html), except that it may not recursively refer to another module registry address. The value of `X-Terraform-Get` may instead be a relative URL, indicated by beginning with `/`, `./` or `../`, in which case it is resolved relative to the full URL of the download endpoint to produce -[an HTTP URL module source](/docs/modules/sources.html#http-urls). +[an HTTP URL module source](/docs/language/modules/sources.html#http-urls). diff --git a/website/docs/internals/provider-network-mirror-protocol.html.md b/website/docs/internals/provider-network-mirror-protocol.html.md index 3ad4895bb..9520f8298 100644 --- a/website/docs/internals/provider-network-mirror-protocol.html.md +++ b/website/docs/internals/provider-network-mirror-protocol.html.md @@ -18,7 +18,7 @@ implement to provide an alternative installation source for Terraform providers, regardless of their origin registries. Terraform uses network mirrors only if you activate them explicitly in -[the CLI configuration's `provider_installation` block](/docs/commands/cli-config.html#provider-installation). +[the CLI configuration's `provider_installation` block](/docs/cli/config/config-file.html#provider-installation). When enabled, a network mirror can serve providers belonging to any registry hostname, which can allow an organization to serve all of the Terraform providers they intend to use from an internal server, rather than from each @@ -36,7 +36,7 @@ instead. Each Terraform provider has an associated address which uniquely identifies it within Terraform. A provider address has the syntax `hostname/namespace/type`, which is described in more detail in -[the Provider Requirements documentation](/docs/configuration/provider-requirements.html). +[the Provider Requirements documentation](/docs/language/providers/requirements.html). By default, the `hostname` portion of a provider address serves both as part of its unique identifier _and_ as the location of the registry to retrieve it @@ -96,7 +96,7 @@ base URL from the above CLI configuration example. ### Authentication If the CLI configuration includes -[credentials](/docs/commands/cli-config.html#credentials) for the hostname +[credentials](/docs/cli/config/config-file.html#credentials) for the hostname given in the network mirror base URL, Terraform will include those credentials in its requests for operations described below. @@ -264,7 +264,7 @@ in the appropriate nested subdirectories, and ensure that your system is configured to serve `.json` files with the `application/json` media type. As a convenience, Terraform CLI includes -[the `terraform providers mirror` subcommand](https://www.terraform.io/docs/commands/providers/mirror.html), +[the `terraform providers mirror` subcommand](https://www.terraform.io/docs/cli/commands/providers/mirror.html), which will analyze the current configuration for the providers it requires, download the packages for those providers from their origin registries, and place them into a local directory suitable for use as a mirror. diff --git a/website/docs/internals/provider-registry-protocol.html.md b/website/docs/internals/provider-registry-protocol.html.md index b81dead37..8dd4c543e 100644 --- a/website/docs/internals/provider-registry-protocol.html.md +++ b/website/docs/internals/provider-registry-protocol.html.md @@ -42,7 +42,7 @@ where: * `hostname` is the registry host that the provider is considered to have originated from, and the default location Terraform will consult for information about the provider - [unless overridden in the CLI configuration](/docs/commands/cli-config.html#provider-installation). + [unless overridden in the CLI configuration](/docs/cli/config/config-file.html#provider-installation). * `namespace` is the name of a namespace, unique on a particular hostname, that can contain one or more providers that are somehow related. On the public Terraform Registry the "namespace" represents the organization that is @@ -78,7 +78,7 @@ to see it as an entirely separate provider that will _not_ be usable by modules that declare a dependency on `hashicorp/azurerm`. If your goal is to create an alternative local distribution source for an existing provider -- that is, a _mirror_ of the provider -- refer to -[the provider installation method configuration](/docs/commands/cli-config.html#provider-installation) +[the provider installation method configuration](/docs/cli/config/config-file.html#provider-installation) instead. ## Provider Versions diff --git a/website/docs/internals/remote-service-discovery.html.md b/website/docs/internals/remote-service-discovery.html.md index 5a2ecb336..f82617e7a 100644 --- a/website/docs/internals/remote-service-discovery.html.md +++ b/website/docs/internals/remote-service-discovery.html.md @@ -85,14 +85,14 @@ version 1 of the module registry protocol: At present, the following service identifiers are in use: -* `login.v1`: [login protocol version 1](/docs/commands/login.html#protocol-v1) +* `login.v1`: [login protocol version 1](/docs/cli/commands/login.html) * `modules.v1`: [module registry API version 1](module-registry-protocol.html) * `providers.v1`: [provider registry API version 1](provider-registry-protocol.html) ## Authentication If credentials for the given hostname are available in -[the CLI config](/docs/commands/cli-config.html) then they will be included +[the CLI config](/docs/cli/config/config-file.html) then they will be included in the request for the discovery document. The credentials may also be provided to endpoints declared in the discovery diff --git a/website/docs/configuration/attr-as-blocks.html.md b/website/docs/language/attr-as-blocks.html.md similarity index 93% rename from website/docs/configuration/attr-as-blocks.html.md rename to website/docs/language/attr-as-blocks.html.md index 5a2dd37a1..ad7759a25 100644 --- a/website/docs/configuration/attr-as-blocks.html.md +++ b/website/docs/language/attr-as-blocks.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Attributes as Blocks - Configuration Language" sidebar_current: "docs-config-attr-as-blocks" description: |- @@ -25,14 +25,14 @@ is set to an empty list (` = []`). Most users do not need to know any further details of this "nested block or empty list" behavior. However, read further if you need to: -- Use Terraform's [JSON syntax](/docs/configuration/syntax-json.html) with this +- Use Terraform's [JSON syntax](/docs/language/syntax/json.html) with this type of resource. - Create a reusable module that wraps this type of resource. ## Details In Terraform v0.12 and later, the language makes a distinction between -[argument syntax and nested block syntax](/docs/configuration/syntax.html#arguments-and-blocks) +[argument syntax and nested block syntax](/docs/language/syntax/configuration.html#arguments-and-blocks) within blocks: * Argument syntax sets a named argument for the containing object. If the @@ -46,7 +46,7 @@ within blocks: merging in with any explicitly-defined arguments. The distinction between these is particularly important for -[JSON syntax](/docs/configuration/syntax-json.html) +[JSON syntax](/docs/language/syntax/json.html) because the same primitive JSON constructs (lists and objects) will be interpreted differently depending on whether a particular name is an argument or a nested block type. @@ -153,7 +153,7 @@ example = [ For the arguments that use the attributes-as-blocks usage mode, the above is a better pattern than using -[`dynamic` blocks](/docs/configuration/expressions.html#dynamic-blocks) +[`dynamic` blocks](/docs/language/expressions/dynamic-blocks.html) because the case where the caller provides an empty list will result in explicitly assigning an empty list value, rather than assigning no value at all and thus retaining and @@ -163,7 +163,7 @@ dynamically-generating _normal_ nested blocks, though. ## In JSON syntax Arguments that use this special mode are specified in JSON syntax always using -the [JSON expression mapping](/docs/configuration/syntax-json.html#expression-mapping) +the [JSON expression mapping](/docs/language/syntax/json.html#expression-mapping) to produce a list of objects. The interpretation of these values in JSON syntax is, therefore, equivalent diff --git a/website/docs/configuration/data-sources.html.md b/website/docs/language/data-sources/index.html.md similarity index 77% rename from website/docs/configuration/data-sources.html.md rename to website/docs/language/data-sources/index.html.md index 7fa77d812..589dfa3c6 100644 --- a/website/docs/configuration/data-sources.html.md +++ b/website/docs/language/data-sources/index.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Data Sources - Configuration Language" sidebar_current: "docs-config-data-sources" description: |- @@ -8,17 +8,16 @@ description: |- # Data Sources --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Data Sources](../configuration-0-11/data-sources.html). +> **Hands-on:** Try the [Query data sources](https://learn.hashicorp.com/tutorials/terraform/data-sources?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. _Data sources_ allow data to be fetched or computed for use elsewhere in Terraform configuration. Use of data sources allows a Terraform configuration to make use of information defined outside of Terraform, or defined by another separate Terraform configuration. -Each [provider](./providers.html) may offer data sources -alongside its set of [resource types](./resources.html#resource-types-and-arguments). +Each [provider](/docs/language/providers/index.html) may offer data sources +alongside its set of [resource](/docs/language/resources/index.html) +types. ## Using Data Sources @@ -48,7 +47,7 @@ resource and so must be unique within a module. Within the block body (between `{` and `}`) are query constraints defined by the data source. Most arguments in this section depend on the data source, and indeed in this example `most_recent`, `owners` and `tags` are -all arguments defined specifically for [the `aws_ami` data source](/docs/providers/aws/d/ami.html). +all arguments defined specifically for [the `aws_ami` data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami). When distinguishing from data resources, the primary kind of resource (as declared by a `resource` block) is known as a _managed resource_. Both kinds of resources @@ -64,14 +63,14 @@ Each data resource is associated with a single data source, which determines the kind of object (or objects) it reads and what query constraint arguments are available. -Each data source in turn belongs to a [provider](./providers.html), +Each data source in turn belongs to a [provider](/docs/language/providers/index.html), which is a plugin for Terraform that offers a collection of resource types and data sources that most often belong to a single cloud or on-premises infrastructure platform. Most of the items within the body of a `data` block are defined by and specific to the selected data source, and these arguments can make full -use of [expressions](./expressions.html) and other dynamic +use of [expressions](/docs/language/expressions/index.html) and other dynamic Terraform language features. However, there are some "meta-arguments" that are defined by Terraform itself @@ -102,9 +101,9 @@ only within Terraform itself, calculating some results and exposing them for use elsewhere. For example, local-only data sources exist for -[rendering templates](/docs/providers/template/d/file.html), -[reading local files](/docs/providers/local/d/file.html), and -[rendering AWS IAM policies](/docs/providers/aws/d/iam_policy_document.html). +[rendering templates](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file), +[reading local files](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file), and +[rendering AWS IAM policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document). The behavior of local-only data sources is the same as all other data sources, but their result data exists only temporarily during a Terraform @@ -113,17 +112,23 @@ operation, and is re-calculated each time a new plan is created. ## Data Resource Dependencies Data resources have the same dependency resolution behavior -[as defined for managed resources](./resources.html#resource-dependencies). +[as defined for managed resources](/docs/language/resources/behavior.html#resource-dependencies). Setting the `depends_on` meta-argument within `data` blocks defers reading of the data source until after all changes to the dependencies have been applied. +In order to ensure that data sources are accessing the most up to date +information possible in a wide variety of use cases, arguments directly +referencing managed resources are treated the same as if the resource was +listed in `depends_on`. This behavior can be avoided when desired by indirectly +referencing the managed resource values through a `local` value. + ~> **NOTE:** **In Terraform 0.12 and earlier**, due to the data resource behavior of deferring the read until the apply phase when depending on values that are not yet known, using `depends_on` with `data` resources will force the read to always be deferred to the apply phase, and therefore a configuration that uses `depends_on` with a `data` resource can never converge. Due to this behavior, we do not recommend using `depends_on` with data resources. ## Multiple Resource Instances -Data resources support [`count`](./resources.html#count-multiple-resource-instances-by-count) -and [`for_each`](./resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) +Data resources support [`count`](/docs/language/meta-arguments/count.html) +and [`for_each`](/docs/language/meta-arguments/for_each.html) meta-arguments as defined for managed resources, with the same syntax and behavior. As with managed resources, when `count` or `for_each` is present it is important to @@ -133,7 +138,7 @@ own variant of the constraint arguments, producing an indexed result. ## Selecting a Non-default Provider Configuration -Data resources support [the `providers` meta-argument](./resources.html#provider-selecting-a-non-default-provider-configuration) +Data resources support [the `provider` meta-argument](/docs/language/meta-arguments/resource-provider.html) as defined for managed resources, with the same syntax and behavior. ## Lifecycle Customizations @@ -187,13 +192,15 @@ resource "aws_instance" "web" { ## Meta-Arguments As data sources are essentially a read only subset of resources, they also -support the same [meta-arguments](./resources.html#meta-arguments) of resources +support the same [meta-arguments](/docs/language/resources/syntax.html#meta-arguments) of resources with the exception of the -[`lifecycle` configuration block](./resources.html#lifecycle-lifecycle-customizations). +[`lifecycle` configuration block](/docs/language/meta-arguments/lifecycle.html). ### Non-Default Provider Configurations -Similarly to [resources](./resources.html), when a module has multiple configurations for the same provider you can specify which configuration to use with the `provider` meta-argument: +Similarly to [resources](/docs/language/resources/index.html), when +a module has multiple configurations for the same provider you can specify which +configuration to use with the `provider` meta-argument: ```hcl data "aws_ami" "web" { @@ -204,7 +211,7 @@ data "aws_ami" "web" { ``` See -[Resources: Selecting a Non-Default Provider Configuration](./resources.html#provider-selecting-a-non-default-provider-configuration) +[The Resource `provider` Meta-Argument](/docs/language/meta-arguments/resource-provider.html) for more information. ## Data Source Lifecycle diff --git a/website/docs/configuration/dependency-lock.html.md b/website/docs/language/dependency-lock.html.md similarity index 84% rename from website/docs/configuration/dependency-lock.html.md rename to website/docs/language/dependency-lock.html.md index fc7a1413d..4f30a68ce 100644 --- a/website/docs/configuration/dependency-lock.html.md +++ b/website/docs/language/dependency-lock.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Dependency Lock File (.terraform.lock.hcl) - Configuration Language" --- @@ -9,14 +9,16 @@ page_title: "Dependency Lock File (.terraform.lock.hcl) - Configuration Language versions of Terraform did not track dependency selections at all, so the information here is not relevant to those versions. +> **Hands-on:** Try the [Lock and Upgrade Provider Versions](https://learn.hashicorp.com/tutorials/terraform/provider-versioning?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + A Terraform configuration may refer to two different kinds of external dependency that come from outside of its own codebase: -* [Providers](./provider-requirements.html), which are plugins for Terraform +* [Providers](/docs/language/providers/requirements.html), which are plugins for Terraform that extend it with support for interacting with various external systems. -* [Modules](./modules.html), which allow splitting out groups of Terraform - configuration constructs (written in the Terraform language) into reusable - abstractions. +* [Modules](/docs/language/modules/index.html), which allow + splitting out groups of Terraform configuration constructs (written in the + Terraform language) into reusable abstractions. Both of these dependency types can be published and updated independently from Terraform itself and from the configurations that depend on them. For that @@ -24,7 +26,7 @@ reason, Terraform must determine which versions of those dependencies are potentially compatible with the current configuration and which versions are currently selected for use. -[Version constraints](./version-constraints.html) within the configuration +[Version constraints](/docs/language/expressions/version-constraints.html) within the configuration itself determine which versions of dependencies are _potentially_ compatible, but after selecting a specific version of each dependency Terraform remembers the decisions it made in a _dependency lock file_ so that it can (by default) @@ -49,7 +51,7 @@ to signify that it is a lock file for various items that Terraform caches in the `.terraform` subdirectory of your working directory. Terraform automatically creates or updates the dependency lock file each time -you run [the `terraform init` command](/docs/commands/init.html). You should +you run [the `terraform init` command](/docs/cli/commands/init.html). You should include this file in your version control repository so that you can discuss potential changes to your external dependencies via code review, just as you would discuss potential changes to your configuration itself. @@ -121,12 +123,12 @@ There are two special considerations with the "trust on first use" model: your current platform _and_ any other packages that might be available for other platforms. - In this case, the `terraform init` output will include the fingerprint of - the key that signed the checksums, with a message like - `(signed by a HashiCorp partner, key ID DC9FC6B1FCE47986)`. You may wish to - confirm that you trust the holder of the given key before committing the - lock file containing the signed checksums, or to retrieve and verify the - full set of available packages for the given provider version. + In this case, the `terraform init` output will include the fingerprint of + the key that signed the checksums, with a message like + `(signed by a HashiCorp partner, key ID DC9FC6B1FCE47986)`. You may wish to + confirm that you trust the holder of the given key before committing the + lock file containing the signed checksums, or to retrieve and verify the + full set of available packages for the given provider version. * If you install a provider for the first time using an alternative installation method, such as a filesystem or network mirror, Terraform will @@ -135,12 +137,12 @@ There are two special considerations with the "trust on first use" model: for other platforms and so the configuration will not be usable on any other platform. - To avoid this problem you can pre-populate checksums for a variety of - different platforms in your lock file using - [the `terraform providers lock` command](/docs/commands/providers/lock.html), - which will then allow future calls to `terraform init` to verify that the - packages available in your chosen mirror match the official packages from - the provider's origin registry. + To avoid this problem you can pre-populate checksums for a variety of + different platforms in your lock file using + [the `terraform providers lock` command](/docs/cli/commands/providers/lock.html), + which will then allow future calls to `terraform init` to verify that the + packages available in your chosen mirror match the official packages from + the provider's origin registry. ## Understanding Lock File Changes @@ -155,7 +157,7 @@ propsed changes. The following sections will describe these common situations. ### Dependency on a new provider If you add a new entry to the -[provider requirements](./provider-requirements.html) for any module in your +[provider requirements](/docs/language/providers/requirements.html) for any module in your configuration, or if you add an external module that includes a new provider dependency itself, `terraform init` will respond to that by selecting the newest version of that provider which meets all of the version constraints @@ -168,7 +170,7 @@ block in the dependency lock file. @@ -6,6 +6,26 @@ ] } - + +provider "registry.terraform.io/hashicorp/azurerm" { + version = "2.30.0" + constraints = "~> 2.12" @@ -219,7 +221,7 @@ block to reflect that change. +++ .terraform.lock.hcl 2020-10-07 16:43:42.785665945 -0700 @@ -7,22 +7,22 @@ } - + provider "registry.terraform.io/hashicorp/azurerm" { - version = "2.1.0" - constraints = "~> 2.1.0" @@ -298,27 +300,27 @@ The two hashing schemes currently supported are: part of the Terraform provider registry protocol and is therefore used for providers that you install directly from an origin registry. - This hashing scheme captures a SHA256 hash of each of the official `.zip` - packages indexed in the origin registry. This is an effective scheme for - verifying the official release packages when installed from a registry, but - it's not suitable for verifying packages that come from other - [provider installation methods](/docs/commands/cli-config.html#provider-installation), - such as filesystem mirrors using the unpacked directory layout. + This hashing scheme captures a SHA256 hash of each of the official `.zip` + packages indexed in the origin registry. This is an effective scheme for + verifying the official release packages when installed from a registry, but + it's not suitable for verifying packages that come from other + [provider installation methods](/docs/cli/config/config-file.html#provider-installation), + such as filesystem mirrors using the unpacked directory layout. * `h1:`: a mnemonic for "hash scheme 1", which is the current preferred hashing scheme. - Hash scheme 1 is also a SHA256 hash, but is one computed from the _contents_ - of the provider distribution package, rather than of the `.zip` archive - it's contained within. This scheme therefore has the advantage that it can - be calculated for an official `.zip` file, an unpacked directory with the - same contents, or a recompressed `.zip` file which contains the same files - but potentially different metadata or compression schemes. + Hash scheme 1 is also a SHA256 hash, but is one computed from the _contents_ + of the provider distribution package, rather than of the `.zip` archive + it's contained within. This scheme therefore has the advantage that it can + be calculated for an official `.zip` file, an unpacked directory with the + same contents, or a recompressed `.zip` file which contains the same files + but potentially different metadata or compression schemes. - Due to the limited scope of the `zh:` scheme, Terraform will - opportunistically add in the corresponding `h1:` checksums as it learns - of them, which is what caused the addition of a second `h1:` checksum - in the example change shown above. + Due to the limited scope of the `zh:` scheme, Terraform will + opportunistically add in the corresponding `h1:` checksums as it learns + of them, which is what caused the addition of a second `h1:` checksum + in the example change shown above. Terraform will add a new hash to an existing provider only if the hash is calculated from a package that _also_ matches one of the existing hashes. In @@ -343,7 +345,7 @@ your configuration on new target platforms, or if you are installing providers from a mirror that therefore can't provide official signed checksums, you can ask Terraform to pre-populate hashes for a chosen set of platforms using -[the `terraform providers lock` command](/docs/commands/providers/lock.html): +[the `terraform providers lock` command](/docs/cli/commands/providers/lock.html): ``` terraform providers lock \ diff --git a/website/docs/language/expressions/conditionals.html.md b/website/docs/language/expressions/conditionals.html.md new file mode 100644 index 000000000..138e2b0e5 --- /dev/null +++ b/website/docs/language/expressions/conditionals.html.md @@ -0,0 +1,68 @@ +--- +layout: "language" +page_title: "Conditional Expressions - Configuration Language" +--- + +# Conditional Expressions + +> **Hands-on:** Try the [Create Dynamic Expressions](https://learn.hashicorp.com/tutorials/terraform/expressions?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +A _conditional expression_ uses the value of a bool expression to select one of +two values. + +The syntax of a conditional expression is as follows: + +```hcl +condition ? true_val : false_val +``` + +If `condition` is `true` then the result is `true_val`. If `condition` is +`false` then the result is `false_val`. + +A common use of conditional expressions is to define defaults to replace +invalid values: + +``` +var.a != "" ? var.a : "default-a" +``` + +If `var.a` is an empty string then the result is `"default-a"`, but otherwise +it is the actual value of `var.a`. + +## Conditions + +The condition can be any expression that resolves to a boolean value. This will +usually be an expression that uses the equality, comparison, or logical +operators. + +## Result Types + +The two result values may be of any type, but they must both +be of the _same_ type so that Terraform can determine what type the whole +conditional expression will return without knowing the condition value. + +If the two result expressions don't produce the same type then Terraform will +attempt to find a type that they can both convert to, and make those +conversions automatically if so. + +For example, the following expression is valid and will always return a string, +because in Terraform all numbers can convert automatically to a string using +decimal digits: + +```hcl +var.example ? 12 : "hello" +``` + +Relying on this automatic conversion behavior can be confusing for those who +are not familiar with Terraform's conversion rules though, so we recommend +being explicit using type conversion functions in any situation where there may +be some uncertainty about the expected result type. + +The following example is contrived because it would be easier to write the +constant `"12"` instead of the type conversion in this case, but shows how to +use [`tostring`](/docs/language/functions/tostring.html) to explicitly convert a number to +a string. + +```hcl +var.example ? tostring(12) : "hello" +``` diff --git a/website/docs/language/expressions/dynamic-blocks.html.md b/website/docs/language/expressions/dynamic-blocks.html.md new file mode 100644 index 000000000..b6117b4a0 --- /dev/null +++ b/website/docs/language/expressions/dynamic-blocks.html.md @@ -0,0 +1,154 @@ +--- +layout: "language" +page_title: "Dynamic Blocks - Configuration Language" +--- + + +# `dynamic` Blocks + +Within top-level block constructs like resources, expressions can usually be +used only when assigning a value to an argument using the `name = expression` +form. This covers many uses, but some resource types include repeatable _nested +blocks_ in their arguments, which typically represent separate objects that +are related to (or embedded within) the containing object: + +```hcl +resource "aws_elastic_beanstalk_environment" "tfenvtest" { + name = "tf-test-name" # can use expressions here + + setting { + # but the "setting" block is always a literal block + } +} +``` + +You can dynamically construct repeatable nested blocks like `setting` using a +special `dynamic` block type, which is supported inside `resource`, `data`, +`provider`, and `provisioner` blocks: + +```hcl +resource "aws_elastic_beanstalk_environment" "tfenvtest" { + name = "tf-test-name" + application = "${aws_elastic_beanstalk_application.tftest.name}" + solution_stack_name = "64bit Amazon Linux 2018.03 v2.11.4 running Go 1.12.6" + + dynamic "setting" { + for_each = var.settings + content { + namespace = setting.value["namespace"] + name = setting.value["name"] + value = setting.value["value"] + } + } +} +``` + +A `dynamic` block acts much like a [`for` expression](for.html), but produces +nested blocks instead of a complex typed value. It iterates over a given +complex value, and generates a nested block for each element of that complex +value. + +- The label of the dynamic block (`"setting"` in the example above) specifies + what kind of nested block to generate. +- The `for_each` argument provides the complex value to iterate over. +- The `iterator` argument (optional) sets the name of a temporary variable + that represents the current element of the complex value. If omitted, the name + of the variable defaults to the label of the `dynamic` block (`"setting"` in + the example above). +- The `labels` argument (optional) is a list of strings that specifies the block + labels, in order, to use for each generated block. You can use the temporary + iterator variable in this value. +- The nested `content` block defines the body of each generated block. You can + use the temporary iterator variable inside this block. + +Since the `for_each` argument accepts any collection or structural value, +you can use a `for` expression or splat expression to transform an existing +collection. + +The iterator object (`setting` in the example above) has two attributes: + +* `key` is the map key or list element index for the current element. If the + `for_each` expression produces a _set_ value then `key` is identical to + `value` and should not be used. +* `value` is the value of the current element. + +A `dynamic` block can only generate arguments that belong to the resource type, +data source, provider or provisioner being configured. It is _not_ possible +to generate meta-argument blocks such as `lifecycle` and `provisioner` +blocks, since Terraform must process these before it is safe to evaluate +expressions. + +The `for_each` value must be a map or set with one element per desired +nested block. If you need to declare resource instances based on a nested +data structure or combinations of elements from multiple data structures you +can use Terraform expressions and functions to derive a suitable value. +For some common examples of such situations, see the +[`flatten`](/docs/language/functions/flatten.html) +and +[`setproduct`](/docs/language/functions/setproduct.html) +functions. + +## Multi-level Nested Block Structures + +Some providers define resource types that include multiple levels of blocks +nested inside one another. You can generate these nested structures dynamically +when necessary by nesting `dynamic` blocks in the `content` portion of other +`dynamic` blocks. + +For example, a module might accept a complex data structure like the following: + +```hcl +variable "load_balancer_origin_groups" { + type = map(object({ + origins = set(object({ + hostname = string + })) + })) +} +``` + +If you were defining a resource whose type expects a block for each origin +group and then nested blocks for each origin within a group, you could ask +Terraform to generate that dynamically using the following nested `dynamic` +blocks: + +```hcl + dynamic "origin_group" { + for_each = var.load_balancer_origin_groups + content { + name = origin_group.key + + dynamic "origin" { + for_each = origin_group.value.origins + content { + hostname = origin.value.hostname + } + } + } + } +``` + +When using nested `dynamic` blocks it's particularly important to pay attention +to the iterator symbol for each block. In the above example, +`origin_group.value` refers to the current element of the outer block, while +`origin.value` refers to the current element of the inner block. + +If a particular resource type defines nested blocks that have the same type +name as one of their parents, you can use the `iterator` argument in each of +`dynamic` blocks to choose a different iterator symbol that makes the two +easier to distinguish. + +## Best Practices for `dynamic` Blocks + +Overuse of `dynamic` blocks can make configuration hard to read and maintain, so +we recommend using them only when you need to hide details in order to build a +clean user interface for a re-usable module. Always write nested blocks out +literally where possible. + +If you find yourself defining most or all of a `resource` block's arguments and +nested blocks using directly-corresponding attributes from an input variable +then that might suggest that your module is not creating a useful abstraction. +It may be better for the calling module to define the resource itself then +pass information about it into your module. For more information on this design +tradeoff, see [When to Write a Module](/docs/language/modules/develop/index.html#when-to-write-a-module) +and [Module Composition](/docs/language/modules/develop/composition.html). diff --git a/website/docs/language/expressions/for.html.md b/website/docs/language/expressions/for.html.md new file mode 100644 index 000000000..77ba0a3cd --- /dev/null +++ b/website/docs/language/expressions/for.html.md @@ -0,0 +1,209 @@ +--- +layout: "language" +page_title: "For Expressions - Configuration Language" +--- + +# `for` Expressions + +A _`for` expression_ creates a complex type value by transforming +another complex type value. Each element in the input value +can correspond to either one or zero values in the result, and an arbitrary +expression can be used to transform each input element into an output element. + +For example, if `var.list` were a list of strings, then the following expression +would produce a tuple of strings with all-uppercase letters: + +```hcl +[for s in var.list : upper(s)] +``` + +This `for` expression iterates over each element of `var.list`, and then +evaluates the expression `upper(s)` with `s` set to each respective element. +It then builds a new tuple value with all of the results of executing that +expression in the same order. + +## Input Types + +A `for` expression's input (given after the `in` keyword) can be a list, +a set, a tuple, a map, or an object. + +The above example showed a `for` expression with only a single temporary +symbol `s`, but a `for` expression can optionally declare a pair of temporary +symbols in order to use the key or index of each item too: + +```hcl +[for k, v in var.map : length(k) + length(v)] +``` + +For a map or object type, like above, the `k` symbol refers to the key or +attribute name of the current element. You can also use the two-symbol form +with lists and tuples, in which case the additional symbol is the index +of each element starting from zero, which conventionally has the symbol name +`i` or `idx` unless it's helpful to choose a more specific name: + +```hcl +[for i, v in var.list : "${i} is ${v}"] +``` + +The index or key symbol is always optional. If you specify only a single +symbol after the `for` keyword then that symbol will always represent the +_value_ of each element of the input collection. + +## Result Types + +The type of brackets around the `for` expression decide what type of result +it produces. + +The above example uses `[` and `]`, which produces a tuple. If you use `{` and +`}` instead, the result is an object and you must provide two result +expressions that are separated by the `=>` symbol: + +```hcl +{for s in var.list : s => upper(s)} +``` + +This expression produces an object whose attributes are the original elements +from `var.list` and their corresponding values are the uppercase versions. +For example, the resulting value might be as follows: + +```hcl +{ + foo = "FOO" + bar = "BAR" + baz = "BAZ" +} +``` + +A `for` expression alone can only produce either an object value or a tuple +value, but Terraform's automatic type conversion rules mean that you can +typically use the results in locations where lists, maps, and sets are expected. + +## Filtering Elements + +A `for` expression can also include an optional `if` clause to filter elements +from the source collection, producing a value with fewer elements than +the source value: + +``` +[for s in var.list : upper(s) if s != ""] +``` + +One common reason for filtering collections in `for` expressions is to split +a single source collection into two separate collections based on some +criteria. For example, if the input `var.users` is a map of objects where the +objects each have an attribute `is_admin` then you may wish to produce separate +maps with admin vs non-admin objects: + +```hcl +variable "users" { + type = map(object({ + is_admin = boolean + })) +} + +locals { + admin_users = { + for name, user in var.users : name => user + if user.is_admin + } + regular_users = { + for name, user in var.users : name => user + if !user.is_admin + } +} +``` + +## Element Ordering + +Because `for` expressions can convert from unordered types (maps, objects, sets) +to unordered types (lists, tuples), Terraform must choose an implied ordering +for the elements of an unordered collection. + +For maps and objects, Terraform sorts the elements by key or attribute name, +using lexical sorting. + +For sets of strings, Terraform sorts the elements by their value, using +lexical sorting. + +For sets of other types, Terraform uses an arbitrary ordering that may change +in future versions of Terraform. For that reason, we recommend converting the +result of such an expression to itself be a set so that it's clear elsewhere +in the configuration that the result is unordered. You can use +[the `toset` function](/docs/language/functions/toset.html) +to concisely convert a `for` expression result to be of a set type. + +```hcl +toset([for e in var.set : e.example]) +``` + +## Grouping Results + +If the result type is an object (using `{` and `}` delimiters) then normally +the given key expression must be unique across all elements in the result, +or Terraform will return an error. + +Sometimes the resulting keys are _not_ unique, and so to support that situation +Terraform supports a special _grouping mode_ which changes the result to support +multiple elements per key. + +To activate grouping mode, add the symbol `...` after the value expression. +For example: + +```hcl +variable "users" { + type = map(object({ + role = string + })) +} + +locals { + users_by_role = { + for name, user in var.users : user.role => name... + } +} +``` + +The above represents a situation where a module expects a map describing +various users who each have a single "role", where the map keys are usernames. +The usernames are guaranteed unique because they are map keys in the input, +but many users may all share a single role name. + +The `local.users_by_role` expression inverts the input map so that the keys +are the role names and the values are usernames, but the expression is in +grouping mode (due to the `...` after `name`) and so the result will be a +map of lists of strings, such as the following: + +```hcl +{ + "admin": [ + "ps", + ], + "maintainer": [ + "am", + "jb", + "kl", + "ma", + ], + "viewer": [ + "st", + "zq", + ], +} +``` + +Due to [the element ordering rules](#element-ordering), Terraform will sort +the users lexically by username as part of evaluating the `for` expression, +and so the usernames associated with each role will be lexically sorted +after grouping. + +## Repeated Configuration Blocks + +The `for` expressions mechanism is for constructing collection values from +other collection values within expressions, which you can then assign to +individual resource arguments that expect complex values. + +Some resource types also define _nested block types_, which typically represent +separate objects that belong to the containing resource in some way. You can't +dynamically generated nested blocks using `for` expressions, but you _can_ +generate nested blocks for a resource dynamically using +[`dynamic` blocks](dynamic-blocks.html). diff --git a/website/docs/language/expressions/function-calls.html.md b/website/docs/language/expressions/function-calls.html.md new file mode 100644 index 000000000..26e230350 --- /dev/null +++ b/website/docs/language/expressions/function-calls.html.md @@ -0,0 +1,112 @@ +--- +layout: "language" +page_title: "Function Calls - Configuration Language" +--- + +# Function Calls + +> **Hands-on:** Try the [Perform Dynamic Operations with Functions](https://learn.hashicorp.com/tutorials/terraform/functions?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +The Terraform language has a number of +[built-in functions](/docs/language/functions/index.html) that can be used +in expressions to transform and combine values. These +are similar to the operators but all follow a common syntax: + +```hcl +(, ) +``` + +The function name specifies which function to call. Each defined function +expects a specific number of arguments with specific value types, and returns a +specific value type as a result. + +Some functions take an arbitrary number of arguments. For example, the `min` +function takes any amount of number arguments and returns the one that is +numerically smallest: + +```hcl +min(55, 3453, 2) +``` + +A function call expression evaluates to the function's return value. + +## Available Functions + +For a full list of available functions, see +[the function reference](/docs/language/functions/index.html). + +## Expanding Function Arguments + +If the arguments to pass to a function are available in a list or tuple value, +that value can be _expanded_ into separate arguments. Provide the list value as +an argument and follow it with the `...` symbol: + +```hcl +min([55, 2453, 2]...) +``` + +The expansion symbol is three periods (`...`), not a Unicode ellipsis character +(`…`). Expansion is a special syntax that is only available in function calls. + +## Using Sensitive Data as Function Arguments + +When using sensitive data, such as [an input variable](https://www.terraform.io/docs/language/values/variables.html#suppressing-values-in-cli-output) +or [an output defined](https://www.terraform.io/docs/language/values/outputs.html#sensitive-suppressing-values-in-cli-output) as sensitive +as function arguments, the result of the function call will be marked as sensitive. + +This is a conservative behavior that is true irrespective of the function being +called. For example, passing an object containing a sensitive input variable to +the `keys()` function will result in a list that is sensitive: + +```shell +> local.baz +{ + "a" = (sensitive) + "b" = "dog" +} +> keys(local.baz) +(sensitive) +``` + +## When Terraform Calls Functions + +Most of Terraform's built-in functions are, in programming language terms, +[pure functions](https://en.wikipedia.org/wiki/Pure_function). This means that +their result is based only on their arguments and so it doesn't make any +practical difference when Terraform would call them. + +However, a small subset of functions interact with outside state and so for +those it can be helpful to know when Terraform will call them in relation to +other events that occur in a Terraform run. + +The small set of special functions includes +[`file`](/docs/language/functions/file.html), +[`templatefile`](/docs/language/functions/templatefile.html), +[`timestamp`](/docs/language/functions/timestamp.html), +and [`uuid`](/docs/language/functions/uuid.html). +If you are not working with these functions then you don't need +to read this section, although the information here may still be interesting +background information. + +The `file` and `templatefile` functions are intended for reading files that +are included as a static part of the configuration and so Terraform will +execute these functions as part of initial configuration validation, before +taking any other actions with the configuration. That means you cannot use +either function to read files that your configuration might generate +dynamically on disk as part of the plan or apply steps. + +The `timestamp` function returns a representation of the current system time +at the point when Terraform calls it, and the `uuid` function returns a random +result which differs on each call. Without any special behavior these would +would both cause the final configuration during the apply step not to match the +actions shown in the plan, which violates the Terraform execution model. + +For that reason, Terraform arranges for both of those functions to produce +[unknown value](references.html#values-not-yet-known) results during the +plan step, with the real result being decided only during the apply step. +For `timestamp` in particular, this means that the recorded time will be +the instant when Terraform began applying the change, rather than when +Terraform _planned_ the change. + +For more details on the behavior of these functions, refer to their own +documentation pages. diff --git a/website/docs/language/expressions/index.html.md b/website/docs/language/expressions/index.html.md new file mode 100644 index 000000000..5a6a18a0f --- /dev/null +++ b/website/docs/language/expressions/index.html.md @@ -0,0 +1,69 @@ +--- +layout: "language" +page_title: "Expressions - Configuration Language" +--- + +# Expressions + +> **Hands-on:** Try the [Create Dynamic Expressions](https://learn.hashicorp.com/tutorials/terraform/expressions?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +_Expressions_ are used to refer to or compute values within a configuration. +The simplest expressions are just literal values, like `"hello"` or `5`, +but the Terraform language also allows more complex expressions such as +references to data exported by resources, arithmetic, conditional evaluation, +and a number of built-in functions. + +Expressions can be used in a number of places in the Terraform language, +but some contexts limit which expression constructs are allowed, +such as requiring a literal value of a particular type or forbidding +[references to resource attributes](/docs/language/expressions/references.html#references-to-resource-attributes). +Each language feature's documentation describes any restrictions it places on +expressions. + +You can experiment with the behavior of Terraform's expressions from +the Terraform expression console, by running +[the `terraform console` command](/docs/cli/commands/console.html). + +The other pages in this section describe the features of Terraform's +expression syntax. + +- [Types and Values](/docs/language/expressions/types.html) + documents the data types that Terraform expressions can resolve to, and the + literal syntaxes for values of those types. + +- [Strings and Templates](/docs/language/expressions/strings.html) + documents the syntaxes for string literals, including interpolation sequences + and template directives. + +- [References to Values](/docs/language/expressions/references.html) + documents how to refer to named values like variables and resource attributes. + +- [Operators](/docs/language/expressions/operators.html) + documents the arithmetic, comparison, and logical operators. + +- [Function Calls](/docs/language/expressions/function-calls.html) + documents the syntax for calling Terraform's built-in functions. + +- [Conditional Expressions](/docs/language/expressions/conditionals.html) + documents the ` ? : ` expression, which + chooses between two values based on a bool condition. + +- [For Expressions](/docs/language/expressions/for.html) + documents expressions like `[for s in var.list : upper(s)]`, which can + transform a complex type value into another complex type value. + +- [Splat Expressions](/docs/language/expressions/splat.html) + documents expressions like `var.list[*].id`, which can extract simpler + collections from more complicated expressions. + +- [Dynamic Blocks](/docs/language/expressions/dynamic-blocks.html) + documents a way to create multiple repeatable nested blocks within a resource + or other construct. + +- [Type Constraints](/docs/language/expressions/type-constraints.html) + documents the syntax for referring to a type, rather than a value of that + type. Input variables expect this syntax in their `type` argument. + +- [Version Constraints](/docs/language/expressions/version-constraints.html) + documents the syntax of special strings that define a set of allowed software + versions. Terraform uses version constraints in several places. diff --git a/website/docs/language/expressions/operators.html.md b/website/docs/language/expressions/operators.html.md new file mode 100644 index 000000000..542232680 --- /dev/null +++ b/website/docs/language/expressions/operators.html.md @@ -0,0 +1,103 @@ +--- +layout: "language" +page_title: "Operators - Configuration Language" +--- + +# Arithmetic and Logical Operators + +An _operator_ is a type of expression that transforms or combines one or more +other expressions. Operators either combine two values in some way to +produce a third result value, or transform a single given value to +produce a single result. + +Operators that work on two values place an operator symbol between the two +values, similar to mathematical notation: `1 + 2`. Operators that work on +only one value place an operator symbol before that value, like +`!true`. + +The Terraform language has a set of operators for both arithmetic and logic, +which are similar to operators in programming languages such as JavaScript +or Ruby. + +When multiple operators are used together in an expression, they are evaluated +in the following order of operations: + +1. `!`, `-` (multiplication by `-1`) +1. `*`, `/`, `%` +1. `+`, `-` (subtraction) +1. `>`, `>=`, `<`, `<=` +1. `==`, `!=` +1. `&&` +1. `||` + +Use parentheses to override the default order of operations. Without +parentheses, higher levels will be evaluated first, so Terraform will interpret +`1 + 2 * 3` as `1 + (2 * 3)` and _not_ as `(1 + 2) * 3`. + +The different operators can be gathered into a few different groups with +similar behavior, as described below. Each group of operators expects its +given values to be of a particular type. Terraform will attempt to convert +values to the required type automatically, or will produce an error message +if automatic conversion is impossible. + +## Arithmetic Operators + +The arithmetic operators all expect number values and produce number values +as results: + +* `a + b` returns the result of adding `a` and `b` together. +* `a - b` returns the result of subtracting `b` from `a`. +* `a * b` returns the result of multiplying `a` and `b`. +* `a / b` returns the result of dividing `a` by `b`. +* `a % b` returns the remainder of dividing `a` by `b`. This operator is + generally useful only when used with whole numbers. +* `-a` returns the result of multiplying `a` by `-1`. + +Terraform supports some other less-common numeric operations as +[functions](function-calls.html). For example, you can calculate exponents +using +[the `pow` function](/docs/language/functions/pow.html). + +## Equality Operators + +The equality operators both take two values of any type and produce boolean +values as results. + +* `a == b` returns `true` if `a` and `b` both have the same type and the same + value, or `false` otherwise. +* `a != b` is the opposite of `a == b`. + +Because the equality operators require both arguments to be of exactly the +same type in order to decide equality, we recommend using these operators only +with values of primitive types or using explicit type conversion functions +to indicate which type you are intending to use for comparison. + +Comparisons between structural types may produce surprising results if you +are not sure about the types of each of the arguments. For example, +`var.list == []` may seem like it would return `true` if `var.list` were an +empty list, but `[]` actually builds a value of type `tuple([])` and so the +two values can never match. In this situation it's often clearer to write +`length(var.list) == 0` instead. + +## Comparison Operators + +The comparison operators all expect number values and produce boolean values +as results. + +* `a < b` returns `true` if `a` is less than `b`, or `false` otherwise. +* `a <= b` returns `true` if `a` is less than or equal to `b`, or `false` + otherwise. +* `a > b` returns `true` if `a` is greater than `b`, or `false` otherwise. +* `a >= b` returns `true` if `a` is greater than or equal to `b`, or `false` otherwise. + +## Logical Operators + +The logical operators all expect bool values and produce bool values as results. + +* `a || b` returns `true` if either `a` or `b` is `true`, or `false` if both are `false`. +* `a && b` returns `true` if both `a` and `b` are `true`, or `false` if either one is `false`. +* `!a` returns `true` if `a` is `false`, and `false` if `a` is `true`. + +Terraform does not have an operator for the "exclusive OR" operation. If you +know that both operators are boolean values then exclusive OR is equivalent +to the `!=` ("not equal") operator. diff --git a/website/docs/language/expressions/references.html.md b/website/docs/language/expressions/references.html.md new file mode 100644 index 000000000..26eed0fb5 --- /dev/null +++ b/website/docs/language/expressions/references.html.md @@ -0,0 +1,359 @@ +--- +layout: "language" +page_title: "References to Values - Configuration Language" +--- + +# References to Named Values + +> **Hands-on:** Try the [Create Dynamic Expressions](https://learn.hashicorp.com/tutorials/terraform/expressions?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +Terraform makes several kinds of named values available. Each of these names is +an expression that references the associated value; you can use them as +standalone expressions, or combine them with other expressions to compute new +values. + +## Types of Named Values + +The main kinds of named values available in Terraform are: + +- Resources +- Input variables +- Local values +- Child module outputs +- Data sources +- Filesystem and workspace info +- Block-local values + +The sections below explain each kind of named value in detail. + +Although many of these names use dot-separated paths that resemble +[attribute notation](./types.html#indices-and-attributes) for elements of object values, they are not +implemented as real objects. This means you must use them exactly as written: +you cannot use square-bracket notation to replace the dot-separated paths, and +you cannot iterate over the "parent object" of a named entity; for example, you +cannot use `aws_instance` in a `for` expression to iterate over every AWS +instance resource. + +### Resources + +`.` represents a [managed resource](/docs/language/resources/index.html) of +the given type and name. + +The value of a resource reference can vary, depending on whether the resource +uses `count` or `for_each`: + +- If the resource doesn't use `count` or `for_each`, the reference's value is an + object. The resource's attributes are elements of the object, and you can + access them using [dot or square bracket notation](./types.html#indices-and-attributes). +- If the resource has the `count` argument set, the reference's value is a + _list_ of objects representing its instances. +- If the resource has the `for_each` argument set, the reference's value is a + _map_ of objects representing its instances. + +Any named value that does not match another pattern listed below +will be interpreted by Terraform as a reference to a managed resource. + +For more information about how to use resource references, see +[references to resource attributes](#references-to-resource-attributes) below. + +### Input Variables + +`var.` is the value of the [input variable](/docs/language/values/variables.html) of the given name. + +If the variable has a type constraint (`type` argument) as part of its +declaration, Terraform will automatically convert the caller's given value +to conform to the type constraint. + +For that reason, you can safely assume that a reference using `var.` will +always produce a value that conforms to the type constraint, even if the caller +provided a value of a different type that was automatically converted. + +In particular, note that if you define a variable as being of an object type +with particular attributes then only _those specific attributes_ will be +available in expressions elsewhere in the module, even if the caller actually +passed in a value with additional attributes. You must define in the type +constraint all of the attributes you intend to use elsewhere in your module. + +### Local Values + +`local.` is the value of the [local value](/docs/language/values/locals.html) of the given name. + +Local values can refer to other local values, even within the same `locals` +block, as long as you don't introduce circular dependencies. + +### Child Module Outputs + +`module.` is an value representing the results of +[a `module` block](/docs/language/modules/syntax.html). + +If the corresponding `module` block does not have either `count` nor `for_each` +set then the value will be an object with one attribute for each output value +defined in the child module. To access one of the module's +[output values](/docs/language/values/outputs.html), use `module..`. + +If the corresponding `module` uses `for_each` then the value will be a map +of objects whose keys correspond with the keys in the `for_each` expression, +and whose values are each objects with one attribute for each output value +defined in the child module, each representing one module instance. + +If the corresponding module uses `count` then the result is similar to for +`for_each` except that the value is a _list_ with the requested number of +elements, each one representing one module instance. + +### Data Sources + +`data..` is an object representing a +[data resource](/docs/language/data-sources/index.html) of the given data +source type and name. If the resource has the `count` argument set, the value +is a list of objects representing its instances. If the resource has the `for_each` +argument set, the value is a map of objects representing its instances. + +For more information, see +[References to Resource Attributes](#references-to-resource-attributes), which +also applies to data resources aside from the addition of the `data.` prefix +to mark the reference as for a data resource. + +### Filesystem and Workspace Info + +* `path.module` is the filesystem path of the module where the expression + is placed. +* `path.root` is the filesystem path of the root module of the configuration. +* `path.cwd` is the filesystem path of the current working directory. In + normal use of Terraform this is the same as `path.root`, but some advanced + uses of Terraform run it from a directory other than the root module + directory, causing these paths to be different. +* `terraform.workspace` is the name of the currently selected + [workspace](/docs/language/state/workspaces.html). + +Use the values in this section carefully, because they include information +about the context in which a configuration is being applied and so may +inadvertently hurt the portability or composability of a module. + +For example, if you use `path.cwd` directly to populate a path into a resource +argument then later applying the same configuration from a different directory +or on a different computer with a different directory structure will cause +the provider to consider the change of path to be a change to be applied, even +if the path still refers to the same file. + +Similarly, if you use any of these values as a form of namespacing in a shared +module, such as using `terraform.workspace` as a prefix for globally-unique +object names, it may not be possible to call your module more than once in +the same configuration. + +Aside from `path.module`, we recommend using the values in this section only +in the root module of your configuration. If you are writing a shared module +which needs a prefix to help create unique names, define an input variable +for your module and allow the calling module to define the prefix. The +calling module can then use `terraform.workspace` to define it if appropriate, +or some other value if not: + +```hcl +module "example" { + # ... + + name_prefix = "app-${terraform-workspace}" +} +``` + +### Block-Local Values + +Within the bodies of certain blocks, or in some other specific contexts, +there are other named values available beyond the global values listed above. +These local names are described in the documentation for the specific contexts +where they appear. Some of most common local names are: + +- `count.index`, in resources that use + [the `count` meta-argument](/docs/language/meta-arguments/count.html). +- `each.key` / `each.value`, in resources that use + [the `for_each` meta-argument](/docs/language/meta-arguments/for_each.html). +- `self`, in [provisioner](/docs/language/resources/provisioners/syntax.html) and + [connection](/docs/language/resources/provisioners/connection.html) blocks. + +-> **Note:** Local names are often referred to as _variables_ or +_temporary variables_ in their documentation. These are not [input +variables](/docs/language/values/variables.html); they are just arbitrary names +that temporarily represent a value. + +The names in this section relate to top-level configuration blocks only. +If you use [`dynamic` blocks](dynamic-blocks.html) to dynamically generate +resource-type-specific _nested_ blocks within `resource` and `data` blocks then +you'll refer to the key and value of each element differently. See the +`dynamic` blocks documentation for details. + +## Named Values and Dependencies + +Constructs like resources and module calls often use references to named values +in their block bodies, and Terraform analyzes these expressions to automatically +infer dependencies between objects. For example, an expression in a resource +argument that refers to another managed resource creates an implicit dependency +between the two resources. + +## References to Resource Attributes + +The most common reference type is a reference to an attribute of a resource +which has been declared either with a `resource` or `data` block. Because +the contents of such blocks can be quite complicated themselves, expressions +referring to these contents can also be complicated. + +Consider the following example resource block: + +```hcl +resource "aws_instance" "example" { + ami = "ami-abc123" + instance_type = "t2.micro" + + ebs_block_device { + device_name = "sda2" + volume_size = 16 + } + ebs_block_device { + device_name = "sda3" + volume_size = 20 + } +} +``` + +The documentation for [`aws_instance`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) +lists all of the arguments and nested blocks supported for this resource type, +and also lists a number of attributes that are _exported_ by this resource +type. All of these different resource type schema constructs are available +for use in references, as follows: + +* The `ami` argument set in the configuration can be used elsewhere with + the reference expression `aws_instance.example.ami`. +* The `id` attribute exported by this resource type can be read using the + same syntax, giving `aws_instance.example.id`. +* The arguments of the `ebs_block_device` nested blocks can be accessed using + a [splat expression](./splat.html). For example, to obtain a list of + all of the `device_name` values, use + `aws_instance.example.ebs_block_device[*].device_name`. +* The nested blocks in this particular resource type do not have any exported + attributes, but if `ebs_block_device` were to have a documented `id` + attribute then a list of them could be accessed similarly as + `aws_instance.example.ebs_block_device[*].id`. +* Sometimes nested blocks are defined as taking a logical key to identify each + block, which serves a similar purpose as the resource's own name by providing + a convenient way to refer to that single block in expressions. If `aws_instance` + had a hypothetical nested block type `device` that accepted such a key, it + would look like this in configuration: + + ```hcl + device "foo" { + size = 2 + } + device "bar" { + size = 4 + } + ``` + + Arguments inside blocks with _keys_ can be accessed using index syntax, such + as `aws_instance.example.device["foo"].size`. + + To obtain a map of values of a particular argument for _labelled_ nested + block types, use a [`for` expression](./for.html): + `{for k, device in aws_instance.example.device : k => device.size}`. + +When a resource has the +[`count`](/docs/language/meta-arguments/count.html) +argument set, the resource itself becomes a _list_ of instance objects rather than +a single object. In that case, access the attributes of the instances using +either [splat expressions](./splat.html) or index syntax: + +* `aws_instance.example[*].id` returns a list of all of the ids of each of the + instances. +* `aws_instance.example[0].id` returns just the id of the first instance. + +When a resource has the +[`for_each`](/docs/language/meta-arguments/for_each.html) +argument set, the resource itself becomes a _map_ of instance objects rather than +a single object, and attributes of instances must be specified by key, or can +be accessed using a [`for` expression](./for.html). + +* `aws_instance.example["a"].id` returns the id of the "a"-keyed resource. +* `[for value in aws_instance.example: value.id]` returns a list of all of the ids + of each of the instances. + +Note that unlike `count`, splat expressions are _not_ directly applicable to resources managed with `for_each`, as splat expressions must act on a list value. However, you can use the `values()` function to extract the instances as a list and use that list value in a splat expression: + +* `values(aws_instance.example)[*].id` + +### Values Not Yet Known + +When Terraform is planning a set of changes that will apply your configuration, +some resource attribute values cannot be populated immediately because their +values are decided dynamically by the remote system. For example, if a +particular remote object type is assigned a generated unique id on creation, +Terraform cannot predict the value of this id until the object has been created. + +To allow expressions to still be evaluated during the plan phase, Terraform +uses special "unknown value" placeholders for these results. In most cases you +don't need to do anything special to deal with these, since the Terraform +language automatically handles unknown values during expressions, so that +for example adding a known value to an unknown value automatically produces +an unknown value as the result. + +However, there are some situations where unknown values _do_ have a significant +effect: + +* The `count` meta-argument for resources cannot be unknown, since it must + be evaluated during the plan phase to determine how many instances are to + be created. + +* If unknown values are used in the configuration of a data resource, that + data resource cannot be read during the plan phase and so it will be deferred + until the apply phase. In this case, the results of the data resource will + _also_ be unknown values. + +* If an unknown value is assigned to an argument inside a `module` block, + any references to the corresponding input variable within the child module + will use that unknown value. + +* If an unknown value is used in the `value` argument of an output value, + any references to that output value in the parent module will use that + unknown value. + +* Terraform will attempt to validate that unknown values are of suitable + types where possible, but incorrect use of such values may not be detected + until the apply phase, causing the apply to fail. + +Unknown values appear in the `terraform plan` output as `(not yet known)`. + +### Sensitive Resource Attributes + +When defining the schema for a resource type, a provider developer can mark +certain attributes as _sensitive_, in which case Terraform will show a +placeholder marker `(sensitive)` instead of the actual value when rendering +a plan involving that attribute. + +The treatment of these particular sensitive values is currently different than +for values in +[input variables](/docs/language/values/variables.html) +and +[output values](/docs/language/values/outputs.html) +that have `sensitive = true` set. Sensitive resource attributes will be +obscured in the plan when they appear directly, but other values that you +_derive_ from a sensitive resource attribute will not themselves be considered +sensitive, and so Terraform will include those derived values in its output +without redacting them. + +Terraform v0.14.0 and later has an +[experimental feature](/docs/language/settings/index.html#experimental-language-features) +to treat resource attributes that are marked as sensitive in the same way as +sensitive input variables and output values, so that Terraform will consider +any derived values as sensitive too. You can activate that experiment for your +module using the `provider_sensitive_attrs` experiment keyword: + +```hcl +terraform { + experiments = [provider_sensitive_attrs] +} +``` + +The behavior of this experiment might change even in future patch releases of +Terraform, so we don't recommend using this experiment in modules you use +to describe production infrastructure. + +If you enable this experiment and you have exported any sensitive resource +attributes via your module's output values then you will see an error unless +you also mark the output value as `sensitive = true`, confirming your intent +to export it. diff --git a/website/docs/language/expressions/splat.html.md b/website/docs/language/expressions/splat.html.md new file mode 100644 index 000000000..731aa767f --- /dev/null +++ b/website/docs/language/expressions/splat.html.md @@ -0,0 +1,128 @@ +--- +layout: "language" +page_title: "Splat Expressions - Configuration Language" +--- + +# Splat Expressions + +> **Hands-on:** Try the [Create Dynamic Expressions](https://learn.hashicorp.com/tutorials/terraform/expressions?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +A _splat expression_ provides a more concise way to express a common +operation that could otherwise be performed with a `for` expression. + +If `var.list` is a list of objects that all have an attribute `id`, then +a list of the ids could be produced with the following `for` expression: + +```hcl +[for o in var.list : o.id] +``` + +This is equivalent to the following _splat expression:_ + +```hcl +var.list[*].id +``` + +The special `[*]` symbol iterates over all of the elements of the list given +to its left and accesses from each one the attribute name given on its +right. A splat expression can also be used to access attributes and indexes +from lists of complex types by extending the sequence of operations to the +right of the symbol: + +```hcl +var.list[*].interfaces[0].name +``` + +The above expression is equivalent to the following `for` expression: + +```hcl +[for o in var.list : o.interfaces[0].name] +``` + +## Splat Expressions with Maps + +The splat expression patterns shown above apply only to lists, sets, and +tuples. To get a similar result with a map or object value you must use +[`for` expressions](for.html). + +Resources that use the `for_each` argument will appear in expressions as a map +of objects, so you can't use splat expressions with those resources. +For more information, see +[Referring to Resource Instances](/docs/language/meta-arguments/for_each.html#referring-to-instances). + +## Single Values as Lists + +Splat expressions have a special behavior when you apply them to a value that +isn't a list, set, or tuple. + +If the value is anything other than a null value then the splat expression will +transform it into a single-element list, or more accurately a single-element +tuple value. If the value is _null_ then the splat expression will return an +empty tuple. + +This special behavior can be useful for modules that accept optional input +variables whose default value is `null` to represent the absense of any value, +to adapt the variable value to work with other Terraform language features that +are designed to work with collections. For example: + +``` +variable "website" { + type = object({ + index_document = string + error_document = string + }) + default = null +} + +resource "aws_s3_bucket" "example" { + # ... + + dynamic "website" { + for_each = var.website[*] + content { + index_document = website.value.index_document + error_document = website.value.error_document + } + } +} +``` + +The above example uses a [`dynamic` block](dynamic-blocks.html), which +generates zero or more nested blocks based on a collection value. The input +variable `var.website` is defined as a single object that might be null, +so the `dynamic` block's `for_each` expression uses `[*]` to ensure that +there will be one block if the module caller sets the website argument, or +zero blocks if the caller leaves it set to null. + +This special behavior of splat expressions is not obvious to an unfamiliar +reader, so we recommend using it only in `for_each` arguments and similar +situations where the context implies working with a collection. Otherwise, +the meaning of the expression may be unclear to future readers. + +## Legacy (Attribute-only) Splat Expressions + +Earlier versions of the Terraform language had a slightly different version +of splat expressions, which Terraform continues to support for backward +compatibility. This older variant is less useful than the modern form described +above, and so we recommend against using it in new configurations. + +The legacy "attribute-only" splat expressions use the sequence `.*`, instead of +`[*]`: + +``` +var.list.*.interfaces[0].name +``` + +This form has a subtly different behavior, equivalent to the following +`for` expression: + +``` +[for o in var.list : o.interfaces][0].name +``` + +Notice that with the attribute-only splat expression the index operation +`[0]` is applied to the result of the iteration, rather than as part of +the iteration itself. Only the attribute lookups apply to each element of +the input. This limitation was confusing some people using older versions of +Terraform and so we recommend always using the new-style splat expressions, +with `[*]`, to get the more consistent behavior. diff --git a/website/docs/language/expressions/strings.html.md b/website/docs/language/expressions/strings.html.md new file mode 100644 index 000000000..72cd0bba0 --- /dev/null +++ b/website/docs/language/expressions/strings.html.md @@ -0,0 +1,222 @@ +--- +layout: "language" +page_title: "Strings and Templates - Configuration Language" +--- + +# Strings and Templates + +String literals are the most complex kind of literal expression in +Terraform, and also the most commonly used. + +Terraform supports both a quoted syntax and a "heredoc" syntax for strings. +Both of these syntaxes support template sequences for interpolating values and +manipulating text. + +## Quoted Strings + +A quoted string is a series of characters delimited by straight double-quote +characters (`"`). + +``` +"hello" +``` + +### Escape Sequences + +In quoted strings, the backslash character serves as an escape +sequence, with the following characters selecting the escape behavior: + +| Sequence | Replacement | +| ------------ | ----------------------------------------------------------------------------- | +| `\n` | Newline | +| `\r` | Carriage Return | +| `\t` | Tab | +| `\"` | Literal quote (without terminating the string) | +| `\\` | Literal backslash | +| `\uNNNN` | Unicode character from the basic multilingual plane (NNNN is four hex digits) | +| `\UNNNNNNNN` | Unicode character from supplementary planes (NNNNNNNN is eight hex digits) | + +There are also two special escape sequences that do not use backslashes: + +| Sequence | Replacement | +| --- | ---- | +| `$${` | Literal `${`, without beginning an interpolation sequence. | +| `%%{` | Literal `%{`, without beginning a template directive sequence. | + +## Heredoc Strings + +Terraform also supports a "heredoc" style of string literal inspired by Unix +shell languages, which allows multi-line strings to be expressed more clearly. + +```hcl +<}`/`%{else}`/`%{endif}` directive chooses between two templates based + on the value of a bool expression: + + ```hcl + "Hello, %{ if var.name != "" }${var.name}%{ else }unnamed%{ endif }!" + ``` + + The `else` portion may be omitted, in which case the result is an empty + string if the condition expression returns `false`. + +* The `%{for in }` / `%{endfor}` directive iterates over the + elements of a given collection or structural value and evaluates a given + template once for each element, concatenating the results together: + + ```hcl + < **Note:** This page is about Terraform 0.12 and later, and documents a -feature that did not exist in older versions. For other information about -Terraform 0.11 and earlier, see -[0.11 Configuration Language](../configuration-0-11/index.html). - Terraform module authors and provider developers can use detailed type constraints to validate user-provided values for their input variables and resource arguments. This requires some additional knowledge about Terraform's @@ -32,9 +27,9 @@ function-like constructs called _type constructors._ represent a type; instead, it represents a _kind_ of similar types. Type constraints look like other kinds of Terraform -[expressions](./expressions.html), but are a special syntax. Within the +[expressions](/docs/language/expressions/index.html), but are a special syntax. Within the Terraform language, they are only valid in the `type` argument of an -[input variable](./variables.html). +[input variable](/docs/language/values/variables.html). ## Primitive Types @@ -160,7 +155,7 @@ like the following: The Terraform language has literal expressions for creating tuple and object values, which are described in -[Expressions: Literal Expressions](./expressions.html#literal-expressions) as +[Expressions: Literal Expressions](/docs/language/expressions/types.html#literal-expressions) as "list/tuple" literals and "map/object" literals, respectively. Terraform does _not_ provide any way to directly represent lists, maps, or sets. diff --git a/website/docs/language/expressions/types.html.md b/website/docs/language/expressions/types.html.md new file mode 100644 index 000000000..c197a7f8e --- /dev/null +++ b/website/docs/language/expressions/types.html.md @@ -0,0 +1,147 @@ +--- +layout: "language" +page_title: "Types and Values - Configuration Language" +--- + +# Types and Values + +The result of an expression is a _value_. All values have a _type_, which +dictates where that value can be used and what transformations can be +applied to it. + +## Types + +The Terraform language uses the following types for its values: + +* `string`: a sequence of Unicode characters representing some text, like + `"hello"`. +* `number`: a numeric value. The `number` type can represent both whole + numbers like `15` and fractional values like `6.283185`. +* `bool`: a boolean value, either `true` or `false`. `bool` values can be used in conditional + logic. +* `list` (or `tuple`): a sequence of values, like + `["us-west-1a", "us-west-1c"]`. Elements in a list or tuple are identified by + consecutive whole numbers, starting with zero. +* `map` (or `object`): a group of values identified by named labels, like + `{name = "Mabel", age = 52}`. + +Strings, numbers, and bools are sometimes called _primitive types._ Lists/tuples and maps/objects are sometimes called _complex types,_ _structural types,_ or _collection types._ + +Finally, there is one special value that has _no_ type: + +* `null`: a value that represents _absence_ or _omission._ If you set an + argument of a resource or module to `null`, Terraform behaves as though you + had completely omitted it — it will use the argument's default value if it has + one, or raise an error if the argument is mandatory. `null` is most useful in + conditional expressions, so you can dynamically omit an argument if a + condition isn't met. + +## Literal Expressions + +A _literal expression_ is an expression that directly represents a particular +constant value. Terraform has a literal expression syntax for each of the value +types described above. + +### Strings + +Strings are usually represented by a double-quoted sequence of Unicode +characters, `"like this"`. There is also a "heredoc" syntax for more complex +strings. + +String literals are the most complex kind of literal expression in +Terraform, and have their own page of documentation. See [Strings](./strings.html) +for information about escape sequences, the heredoc syntax, interpolation, and +template directives. + +### Numbers + +Numbers are represented by unquoted sequences of digits with or without a +decimal point, like `15` or `6.283185`. + +### Bools + +Bools are represented by the unquoted symbols `true` and `false`. + +### Null + +The null value is represented by the unquoted symbol `null`. + +### Lists/Tuples + +Lists/tuples are represented by a pair of square brackets containing a +comma-separated sequence of values, like `["a", 15, true]`. + +List literals can be split into multiple lines for readability, but always +require a comma between values. A comma after the final value is allowed, +but not required. Values in a list can be arbitrary expressions. + +### Maps/Objects + +Maps/objects are represented by a pair of curly braces containing a series of +` = ` pairs: + +```hcl +{ + name = "John" + age = 52 +} +``` + +Key/value pairs can be separated by either a comma or a line break. + +The values in a map +can be arbitrary expressions. + +The keys in a map must be strings; they can be left unquoted if +they are a valid [identifier](/docs/language/syntax/configuration.html#identifiers), but must be quoted +otherwise. You can use a non-literal string expression as a key by wrapping it in +parentheses, like `(var.business_unit_tag_name) = "SRE"`. + +## Indices and Attributes + +[inpage-index]: #indices-and-attributes + +Elements of list/tuple and map/object values can be accessed using +the square-bracket index notation, like `local.list[3]`. The expression within +the brackets must be a whole number for list and tuple values or a string +for map and object values. + +Map/object attributes with names that are valid identifiers can also be accessed +using the dot-separated attribute notation, like `local.object.attrname`. +In cases where a map might contain arbitrary user-specified keys, we recommend +using only the square-bracket index notation (`local.map["keyname"]`). + +## More About Complex Types + +In most situations, lists and tuples behave identically, as do maps and objects. +Whenever the distinction isn't relevant, the Terraform documentation uses each +pair of terms interchangeably (with a historical preference for "list" and +"map"). + +However, module authors and provider developers should understand the +differences between these similar types (and the related `set` type), since they +offer different ways to restrict the allowed values for input variables and +resource arguments. + +For complete details about these types (and an explanation of why the difference +usually doesn't matter), see [Type Constraints](/docs/language/expressions/type-constraints.html). + +## Type Conversion + +Expressions are most often used to set values for the arguments of resources and +child modules. In these cases, the argument has an expected type and the given +expression must produce a value of that type. + +Where possible, Terraform automatically converts values from one type to +another in order to produce the expected type. If this isn't possible, Terraform +will produce a type mismatch error and you must update the configuration with a +more suitable expression. + +Terraform automatically converts number and bool values to strings when needed. +It also converts strings to numbers or bools, as long as the string contains a +valid representation of a number or bool value. + +* `true` converts to `"true"`, and vice-versa +* `false` converts to `"false"`, and vice-versa +* `15` converts to `"15"`, and vice-versa + diff --git a/website/docs/configuration/version-constraints.html.md b/website/docs/language/expressions/version-constraints.html.md similarity index 83% rename from website/docs/configuration/version-constraints.html.md rename to website/docs/language/expressions/version-constraints.html.md index 695580154..0f7ebba54 100644 --- a/website/docs/configuration/version-constraints.html.md +++ b/website/docs/language/expressions/version-constraints.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Version Constraints - Configuration Language" --- @@ -9,9 +9,9 @@ Anywhere that Terraform lets you specify a range of acceptable versions for something, it expects a specially formatted string known as a version constraint. Version constraints are used when configuring: -- [Modules](./modules.html) -- [Provider requirements](./provider-requirements.html) -- [The `required_version` setting](./terraform.html#specifying-a-required-terraform-version) in the `terraform` block. +- [Modules](/docs/language/modules/index.html) +- [Provider requirements](/docs/language/providers/requirements.html) +- [The `required_version` setting](/docs/language/settings/index.html#specifying-a-required-terraform-version) in the `terraform` block. ## Version Constraint Syntax @@ -22,7 +22,7 @@ other dependency management systems like Bundler and NPM. version = ">= 1.2.0, < 2.0.0" ``` -A version constraint is a [string literal](./expressions.html#string-literals) +A version constraint is a [string literal](/docs/language/expressions/strings.html) containing one or more conditions, which are separated by commas. Each condition consists of an operator and a version number. @@ -41,11 +41,10 @@ The following operators are valid: versions for which the comparison is true. "Greater-than" requests newer versions, and "less-than" requests older versions. -- `~>`: Allows the specified version, plus newer versions that only - increase the _most specific_ segment of the specified version number. For - example, `~> 0.9` is equivalent to `>= 0.9, < 1.0`, and `~> 0.8.4`, is - equivalent to `>= 0.8.4, < 0.9`. This is usually called the pessimistic - constraint operator. +- `~>`: Allows only the _rightmost_ version component to increment. For example, + to allow new patch releases within a specific minor release, use the full + version number: `~> 1.0.4` will allow installation of `1.0.5` and `1.0.10` + but not `1.1.0`. This is usually called the pessimistic constraint operator. ## Version Constraint Behavior diff --git a/website/docs/language/files/index.html.md b/website/docs/language/files/index.html.md new file mode 100644 index 000000000..109c60f73 --- /dev/null +++ b/website/docs/language/files/index.html.md @@ -0,0 +1,57 @@ +--- +layout: "language" +page_title: "Files and Directories - Configuration Language" +--- + +# Files and Directories + +## File Extension + +Code in the Terraform language is stored in plain text files with the `.tf` file +extension. There is also +[a JSON-based variant of the language](/docs/language/syntax/json.html) that is named with +the `.tf.json` file extension. + +Files containing Terraform code are often called _configuration files._ + +## Text Encoding + +Configuration files must always use UTF-8 encoding, and by convention +usually use Unix-style line endings (LF) rather than Windows-style +line endings (CRLF), though both are accepted. + +## Directories and Modules + +A _module_ is a collection of `.tf` and/or `.tf.json` files kept together in a +directory. + +A Terraform module only consists of the top-level configuration files in a +directory; nested directories are treated as completely separate modules, and +are not automatically included in the configuration. + +Terraform evaluates all of the configuration files in a module, effectively +treating the entire module as a single document. Separating various blocks into +different files is purely for the convenience of readers and maintainers, and +has no effect on the module's behavior. + +A Terraform module can use [module calls](/docs/language/modules/index.html) to +explicitly include other modules into the configuration. These child modules can +come from local directories (nested in the parent module's directory, or +anywhere else on disk), or from external sources like the +[Terraform Registry](https://registry.terraform.io). + +## The Root Module + +Terraform always runs in the context of a single _root module._ A complete +_Terraform configuration_ consists of a root module and the tree of child +modules (which includes the modules called by the root module, any modules +called by those modules, etc.). + +- In Terraform CLI, the root module is the working directory where Terraform is + invoked. (You can use command line options to specify a root module outside + the working directory, but in practice this is rare. ) +- In Terraform Cloud and Terraform Enterprise, the root module for a workspace + defaults to the top level of the configuration directory (supplied via version + control repository or direct upload), but the workspace settings can specify a + subdirectory to use instead. + diff --git a/website/docs/configuration/override.html.md b/website/docs/language/files/override.html.md similarity index 96% rename from website/docs/configuration/override.html.md rename to website/docs/language/files/override.html.md index 10ab5467a..334aa2eb5 100644 --- a/website/docs/configuration/override.html.md +++ b/website/docs/language/files/override.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Override Files - Configuration Language" sidebar_current: "docs-config-override" description: |- @@ -9,10 +9,6 @@ description: |- # Override Files --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Override Files](../configuration-0-11/override.html). - Terraform normally loads all of the `.tf` and `.tf.json` files within a directory and expects each one to define a distinct set of configuration objects. If two files attempt to define the same object, Terraform returns diff --git a/website/docs/configuration/functions/abs.html.md b/website/docs/language/functions/abs.html.md similarity index 69% rename from website/docs/configuration/functions/abs.html.md rename to website/docs/language/functions/abs.html.md index 012bd00b4..51d3bc01c 100644 --- a/website/docs/configuration/functions/abs.html.md +++ b/website/docs/language/functions/abs.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "abs - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-abs" description: |- @@ -8,10 +8,6 @@ description: |- # `abs` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `abs` returns the absolute value of the given number. In other words, if the number is zero or positive then it is returned as-is, but if it is negative then it is multiplied by -1 to make it positive before returning it. diff --git a/website/docs/configuration/functions/abspath.html.md b/website/docs/language/functions/abspath.html.md similarity index 80% rename from website/docs/configuration/functions/abspath.html.md rename to website/docs/language/functions/abspath.html.md index cf5bceba1..3da10ed72 100644 --- a/website/docs/configuration/functions/abspath.html.md +++ b/website/docs/language/functions/abspath.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "abspath - Functions - Configuration Language" sidebar_current: "docs-funcs-file-abspath" description: |- @@ -8,10 +8,6 @@ description: |- # `abspath` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `abspath` takes a string containing a filesystem path and converts it to an absolute path. That is, if the path is not absolute, it will be joined with the current working directory. diff --git a/website/docs/configuration/functions/alltrue.html.md b/website/docs/language/functions/alltrue.html.md similarity index 72% rename from website/docs/configuration/functions/alltrue.html.md rename to website/docs/language/functions/alltrue.html.md index 173685c1f..c20dc3b31 100644 --- a/website/docs/configuration/functions/alltrue.html.md +++ b/website/docs/language/functions/alltrue.html.md @@ -1,5 +1,5 @@ --- -layout: functions +layout: "language" page_title: alltrue - Functions - Configuration Language sidebar_current: docs-funcs-collection-alltrue description: |- @@ -9,9 +9,7 @@ description: |- # `alltrue` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). +-> **Note:** This function is available in Terraform 0.14 and later. `alltrue` returns `true` if all elements in a given collection are `true` or `"true"`. It also returns `true` if the collection is empty. diff --git a/website/docs/configuration/functions/anytrue.html.md b/website/docs/language/functions/anytrue.html.md similarity index 73% rename from website/docs/configuration/functions/anytrue.html.md rename to website/docs/language/functions/anytrue.html.md index 0a6005a71..35e3c5ded 100644 --- a/website/docs/configuration/functions/anytrue.html.md +++ b/website/docs/language/functions/anytrue.html.md @@ -1,5 +1,5 @@ --- -layout: functions +layout: "language" page_title: anytrue - Functions - Configuration Language sidebar_current: docs-funcs-collection-anytrue description: |- @@ -9,9 +9,7 @@ description: |- # `anytrue` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). +-> **Note:** This function is available in Terraform 0.14 and later. `anytrue` returns `true` if any element in a given collection is `true` or `"true"`. It also returns `false` if the collection is empty. diff --git a/website/docs/configuration/functions/base64decode.html.md b/website/docs/language/functions/base64decode.html.md similarity index 89% rename from website/docs/configuration/functions/base64decode.html.md rename to website/docs/language/functions/base64decode.html.md index f81ed9122..41feab121 100644 --- a/website/docs/configuration/functions/base64decode.html.md +++ b/website/docs/language/functions/base64decode.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "base64decode - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-base64decode" description: |- @@ -8,10 +8,6 @@ description: |- # `base64decode` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `base64decode` takes a string containing a Base64 character sequence and returns the original string. diff --git a/website/docs/configuration/functions/base64encode.html.md b/website/docs/language/functions/base64encode.html.md similarity index 90% rename from website/docs/configuration/functions/base64encode.html.md rename to website/docs/language/functions/base64encode.html.md index e3e6bc5e4..8a4ddb1c3 100644 --- a/website/docs/configuration/functions/base64encode.html.md +++ b/website/docs/language/functions/base64encode.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "base64encode - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-base64encode" description: |- @@ -8,10 +8,6 @@ description: |- # `base64encode` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `base64encode` applies Base64 encoding to a string. Terraform uses the "standard" Base64 alphabet as defined in diff --git a/website/docs/configuration/functions/base64gzip.html.md b/website/docs/language/functions/base64gzip.html.md similarity index 85% rename from website/docs/configuration/functions/base64gzip.html.md rename to website/docs/language/functions/base64gzip.html.md index 5bd11434b..f8d103a9d 100644 --- a/website/docs/configuration/functions/base64gzip.html.md +++ b/website/docs/language/functions/base64gzip.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "base64gzip - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-base64gzip" description: |- @@ -9,10 +9,6 @@ description: |- # `base64gzip` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `base64gzip` compresses a string with gzip and then encodes the result in Base64 encoding. diff --git a/website/docs/configuration/functions/base64sha256.html.md b/website/docs/language/functions/base64sha256.html.md similarity index 84% rename from website/docs/configuration/functions/base64sha256.html.md rename to website/docs/language/functions/base64sha256.html.md index 381f410be..1edfc98da 100644 --- a/website/docs/configuration/functions/base64sha256.html.md +++ b/website/docs/language/functions/base64sha256.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "base64sha256 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-base64sha256" description: |- @@ -9,10 +9,6 @@ description: |- # `base64sha256` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `base64sha256` computes the SHA256 hash of a given string and encodes it with Base64. This is not equivalent to `base64encode(sha256("test"))` since `sha256()` returns hexadecimal representation. diff --git a/website/docs/configuration/functions/base64sha512.html.md b/website/docs/language/functions/base64sha512.html.md similarity index 85% rename from website/docs/configuration/functions/base64sha512.html.md rename to website/docs/language/functions/base64sha512.html.md index b910d78f3..87bd52137 100644 --- a/website/docs/configuration/functions/base64sha512.html.md +++ b/website/docs/language/functions/base64sha512.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "base64sha512 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-base64sha512" description: |- @@ -9,10 +9,6 @@ description: |- # `base64sha512` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `base64sha512` computes the SHA512 hash of a given string and encodes it with Base64. This is not equivalent to `base64encode(sha512("test"))` since `sha512()` returns hexadecimal representation. diff --git a/website/docs/configuration/functions/basename.html.md b/website/docs/language/functions/basename.html.md similarity index 86% rename from website/docs/configuration/functions/basename.html.md rename to website/docs/language/functions/basename.html.md index 8db5fa4bd..e7869d642 100644 --- a/website/docs/configuration/functions/basename.html.md +++ b/website/docs/language/functions/basename.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "basename - Functions - Configuration Language" sidebar_current: "docs-funcs-file-basename" description: |- @@ -9,10 +9,6 @@ description: |- # `basename` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `basename` takes a string containing a filesystem path and removes all except the last portion from it. diff --git a/website/docs/configuration/functions/bcrypt.html.md b/website/docs/language/functions/bcrypt.html.md similarity index 84% rename from website/docs/configuration/functions/bcrypt.html.md rename to website/docs/language/functions/bcrypt.html.md index 668523439..b1b0eb79a 100644 --- a/website/docs/configuration/functions/bcrypt.html.md +++ b/website/docs/language/functions/bcrypt.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "bcrypt - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-bcrypt" description: |- @@ -9,10 +9,6 @@ description: |- # `bcrypt` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `bcrypt` computes a hash of the given string using the Blowfish cipher, returning a string in [the _Modular Crypt Format_](https://passlib.readthedocs.io/en/stable/modular_crypt_format.html) diff --git a/website/docs/configuration/functions/can.html.md b/website/docs/language/functions/can.html.md similarity index 88% rename from website/docs/configuration/functions/can.html.md rename to website/docs/language/functions/can.html.md index 5cb3abb61..8c05a304e 100644 --- a/website/docs/configuration/functions/can.html.md +++ b/website/docs/language/functions/can.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "can - Functions - Configuration Language" sidebar_current: "docs-funcs-conversion-can" description: |- @@ -9,10 +9,6 @@ description: |- # `can` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `can` evaluates the given expression and returns a boolean value indicating whether the expression produced a result without any errors. @@ -23,7 +19,7 @@ fallback values for failing expressions. The primary purpose of `can` is to turn an error condition into a boolean validation result when writing -[custom variable validation rules](../variables.html#custom-validation-rules). +[custom variable validation rules](/docs/language/values/variables.html#custom-validation-rules). For example: ``` diff --git a/website/docs/configuration/functions/ceil.html.md b/website/docs/language/functions/ceil.html.md similarity index 70% rename from website/docs/configuration/functions/ceil.html.md rename to website/docs/language/functions/ceil.html.md index deb4dada6..0b68172ce 100644 --- a/website/docs/configuration/functions/ceil.html.md +++ b/website/docs/language/functions/ceil.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "ceil - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-ceil" description: |- @@ -9,10 +9,6 @@ description: |- # `ceil` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `ceil` returns the closest whole number that is greater than or equal to the given value, which may be a fraction. diff --git a/website/docs/configuration/functions/chomp.html.md b/website/docs/language/functions/chomp.html.md similarity index 74% rename from website/docs/configuration/functions/chomp.html.md rename to website/docs/language/functions/chomp.html.md index 1e8db54a8..5ea67e81a 100644 --- a/website/docs/configuration/functions/chomp.html.md +++ b/website/docs/language/functions/chomp.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "chomp - Functions - Configuration Language" sidebar_current: "docs-funcs-string-chomp" description: |- @@ -8,10 +8,6 @@ description: |- # `chomp` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `chomp` removes newline characters at the end of a string. This can be useful if, for example, the string was read from a file that has diff --git a/website/docs/configuration/functions/chunklist.html.md b/website/docs/language/functions/chunklist.html.md similarity index 75% rename from website/docs/configuration/functions/chunklist.html.md rename to website/docs/language/functions/chunklist.html.md index 63df0bc66..21823ab47 100644 --- a/website/docs/configuration/functions/chunklist.html.md +++ b/website/docs/language/functions/chunklist.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "chunklist - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-chunklist" description: |- @@ -9,10 +9,6 @@ description: |- # `chunklist` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `chunklist` splits a single list into fixed-size chunks, returning a list of lists. diff --git a/website/docs/configuration/functions/cidrhost.html.md b/website/docs/language/functions/cidrhost.html.md similarity index 88% rename from website/docs/configuration/functions/cidrhost.html.md rename to website/docs/language/functions/cidrhost.html.md index a75793120..e43540362 100644 --- a/website/docs/configuration/functions/cidrhost.html.md +++ b/website/docs/language/functions/cidrhost.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "cidrhost - Functions - Configuration Language" sidebar_current: "docs-funcs-ipnet-cidrhost" description: |- @@ -9,10 +9,6 @@ description: |- # `cidrhost` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `cidrhost` calculates a full host IP address for a given host number within a given IP network address prefix. diff --git a/website/docs/configuration/functions/cidrnetmask.html.md b/website/docs/language/functions/cidrnetmask.html.md similarity index 80% rename from website/docs/configuration/functions/cidrnetmask.html.md rename to website/docs/language/functions/cidrnetmask.html.md index ce3fa8adc..bb3de4f7b 100644 --- a/website/docs/configuration/functions/cidrnetmask.html.md +++ b/website/docs/language/functions/cidrnetmask.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "cidrnetmask - Functions - Configuration Language" sidebar_current: "docs-funcs-ipnet-cidrnetmask" description: |- @@ -9,10 +9,6 @@ description: |- # `cidrnetmask` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `cidrnetmask` converts an IPv4 address prefix given in CIDR notation into a subnet mask address. diff --git a/website/docs/configuration/functions/cidrsubnet.html.md b/website/docs/language/functions/cidrsubnet.html.md similarity index 96% rename from website/docs/configuration/functions/cidrsubnet.html.md rename to website/docs/language/functions/cidrsubnet.html.md index 6988ce3f6..e64c894fb 100644 --- a/website/docs/configuration/functions/cidrsubnet.html.md +++ b/website/docs/language/functions/cidrsubnet.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "cidrsubnet - Functions - Configuration Language" sidebar_current: "docs-funcs-ipnet-cidrsubnet" description: |- @@ -9,10 +9,6 @@ description: |- # `cidrsubnet` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `cidrsubnet` calculates a subnet address within given IP network address prefix. ```hcl diff --git a/website/docs/configuration/functions/cidrsubnets.html.md b/website/docs/language/functions/cidrsubnets.html.md similarity index 91% rename from website/docs/configuration/functions/cidrsubnets.html.md rename to website/docs/language/functions/cidrsubnets.html.md index 7308cf5de..4f43635c7 100644 --- a/website/docs/configuration/functions/cidrsubnets.html.md +++ b/website/docs/language/functions/cidrsubnets.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "cidrsubnets - Functions - Configuration Language" sidebar_current: "docs-funcs-ipnet-cidrsubnets" description: |- @@ -9,10 +9,6 @@ description: |- # `cidrsubnets` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `cidrsubnets` calculates a sequence of consecutive IP address ranges within a particular CIDR prefix. @@ -70,7 +66,7 @@ platforms. ``` You can use nested `cidrsubnets` calls with -[`for` expressions](/docs/configuration/expressions.html#for-expressions) +[`for` expressions](/docs/language/expressions/for.html) to concisely allocate groups of network address blocks: ``` diff --git a/website/docs/configuration/functions/coalesce.html.md b/website/docs/language/functions/coalesce.html.md similarity index 52% rename from website/docs/configuration/functions/coalesce.html.md rename to website/docs/language/functions/coalesce.html.md index e38a0971f..6458970ee 100644 --- a/website/docs/configuration/functions/coalesce.html.md +++ b/website/docs/language/functions/coalesce.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "coalesce - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-coalesce-x" description: |- @@ -9,13 +9,14 @@ description: |- # `coalesce` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `coalesce` takes any number of arguments and returns the first one that isn't null or an empty string. +All of the arguments must be of the same type. Terraform will try to +convert mismatched arguments to the most general of the types that all +arguments can convert to, or return an error if the types are incompatible. +The result type is the same as the type of all of the arguments. + ## Examples ``` @@ -35,6 +36,22 @@ symbol to expand the list as arguments: b ``` +Terraform attempts to select a result type that all of the arguments can +convert to, so mixing argument types may produce surprising results due to +Terraform's automatic type conversion rules: + +``` +> coalesce(1, "hello") +"1" +> coalesce(true, "hello") +"true" +> coalesce({}, "hello") + +Error: Error in function call + +Call to function "coalesce" failed: all arguments must have the same type. +``` + ## Related Functions * [`coalescelist`](./coalescelist.html) performs a similar operation with diff --git a/website/docs/configuration/functions/coalescelist.html.md b/website/docs/language/functions/coalescelist.html.md similarity index 79% rename from website/docs/configuration/functions/coalescelist.html.md rename to website/docs/language/functions/coalescelist.html.md index aa265aa7d..6508fa31f 100644 --- a/website/docs/configuration/functions/coalescelist.html.md +++ b/website/docs/language/functions/coalescelist.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "coalescelist - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-coalescelist" description: |- @@ -9,10 +9,6 @@ description: |- # `coalescelist` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `coalescelist` takes any number of list arguments and returns the first one that isn't empty. diff --git a/website/docs/configuration/functions/compact.html.md b/website/docs/language/functions/compact.html.md similarity index 65% rename from website/docs/configuration/functions/compact.html.md rename to website/docs/language/functions/compact.html.md index 9659efa54..cd2d6379c 100644 --- a/website/docs/configuration/functions/compact.html.md +++ b/website/docs/language/functions/compact.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "compact - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-compact" description: |- @@ -8,10 +8,6 @@ description: |- # `compact` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `compact` takes a list of strings and returns a new list with any empty string elements removed. diff --git a/website/docs/configuration/functions/concat.html.md b/website/docs/language/functions/concat.html.md similarity index 63% rename from website/docs/configuration/functions/concat.html.md rename to website/docs/language/functions/concat.html.md index 032ec8c8b..47ef10ac0 100644 --- a/website/docs/configuration/functions/concat.html.md +++ b/website/docs/language/functions/concat.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "concat - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-concat" description: |- @@ -8,10 +8,6 @@ description: |- # `concat` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `concat` takes two or more lists and combines them into a single list. ## Examples diff --git a/website/docs/configuration/functions/contains.html.md b/website/docs/language/functions/contains.html.md similarity index 69% rename from website/docs/configuration/functions/contains.html.md rename to website/docs/language/functions/contains.html.md index ff21003f6..8ab835eb4 100644 --- a/website/docs/configuration/functions/contains.html.md +++ b/website/docs/language/functions/contains.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "contains - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-contains" description: |- @@ -8,10 +8,6 @@ description: |- # `contains` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `contains` determines whether a given list or set contains a given single value as one of its elements. diff --git a/website/docs/configuration/functions/csvdecode.html.md b/website/docs/language/functions/csvdecode.html.md similarity index 87% rename from website/docs/configuration/functions/csvdecode.html.md rename to website/docs/language/functions/csvdecode.html.md index 508737443..be3016286 100644 --- a/website/docs/configuration/functions/csvdecode.html.md +++ b/website/docs/language/functions/csvdecode.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "csvdecode - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-csvdecode" description: |- @@ -8,10 +8,6 @@ description: |- # `csvdecode` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `csvdecode` decodes a string containing CSV-formatted data and produces a list of maps representing that data. @@ -46,7 +42,7 @@ number of fields, or this function will produce an error. ## Use with the `for_each` meta-argument You can use the result of `csvdecode` with -[the `for_each` meta-argument](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) +[the `for_each` meta-argument](/docs/language/meta-arguments/for_each.html) to describe a collection of similar objects whose differences are described by the rows in the given CSV file. @@ -94,7 +90,7 @@ create or destroy associated instances as appropriate. If there is no reasonable value you can use as a unique identifier in your CSV then you could instead use -[the `count` meta-argument](/docs/configuration/resources.html#count-multiple-resource-instances-by-count) +[the `count` meta-argument](/docs/language/meta-arguments/count.html) to define an object for each CSV row, with each one identified by its index into the list returned by `csvdecode`. However, in that case any future updates to the CSV may be disruptive if they change the positions of particular objects in diff --git a/website/docs/language/functions/defaults.html.md b/website/docs/language/functions/defaults.html.md new file mode 100644 index 000000000..b4d684780 --- /dev/null +++ b/website/docs/language/functions/defaults.html.md @@ -0,0 +1,201 @@ +--- +layout: "language" +page_title: "defaults - Functions - Configuration Language" +sidebar_current: "docs-funcs-conversion-defaults" +description: |- + The defaults function can fill in default values in place of null values. +--- + +# `defaults` Function + +-> **Note:** This function is available only in Terraform 0.15 and later. + +~> **Experimental:** This function is part of +[the optional attributes experiment](/docs/language/expressions/type-constraints.html#experimental-optional-object-type-attributes) +and is only available in modules where the `module_variable_optional_attrs` +experiment is explicitly enabled. + +The `defaults` function is a specialized function intended for use with +input variables whose type constraints are object types or collections of +object types that include optional attributes. + +When you define an attribute as optional and the caller doesn't provide an +explicit value for it, Terraform will set the attribute to `null` to represent +that it was omitted. If you want to use a placeholder value other than `null` +when an attribute isn't set, you can use the `defaults` function to concisely +assign default values only where an attribute value was set to `null`. + +``` +defaults(input_value, defaults) +``` + +The `defaults` function expects that the `input_value` argument will be the +value of an input variable with an exact [type constraint](/docs/language/expressions/types.html) +(not containing `any`). The function will then visit every attribute in +the data structure, including attributes of nested objects, and apply the +default values given in the defaults object. + +The interpretation of attributes in the `defaults` argument depends on what +type an attribute has in the `input_value`: + +* **Primitive types** (`string`, `number`, `bool`): if a default value is given + then it will be used only if the `input_value`'s attribute of the same + name has the value `null`. The default value's type must match the input + value's type. +* **Structural types** (`object` and `tuple` types): Terraform will recursively + visit all of the attributes or elements of the nested value and repeat the + same defaults-merging logic one level deeper. The default value's type must + be of the same kind as the input value's type, and a default value for an + object type must only contain attribute names that appear in the input + value's type. +* **Collection types** (`list`, `map`, and `set` types): Terraform will visit + each of the collection elements in turn and apply defaults to them. In this + case the default value is only a single value to be applied to _all_ elements + of the collection, so it must have a type compatible with the collection's + element type rather than with the collection type itself. + +The above rules may be easier to follow with an example. Consider the following +Terraform configuration: + +```hcl +terraform { + # Optional attributes and the defaults function are + # both experimental, so we must opt in to the experiment. + experiments = [module_variable_optional_attrs] +} + +variable "storage" { + type = object({ + name = string + enabled = optional(bool) + website = object({ + index_document = optional(string) + error_document = optional(string) + }) + documents = map( + object({ + source_file = string + content_type = optional(string) + }) + ) + }) +} + +locals { + storage = defaults(var.storage, { + # If "enabled" isn't set then it will default + # to true. + enabled = true + + # The "website" attribute is required, but + # it's here to provide defaults for the + # optional attributes inside. + website = { + index_document = "index.html" + error_document = "error.html" + } + + # The "documents" attribute has a map type, + # so the default value represents defaults + # to be applied to all of the elements in + # the map, not for the map itself. Therefore + # it's a single object matching the map + # element type, not a map itself. + documents = { + # If _any_ of the map elements omit + # content_type then this default will be + # used instead. + content_type = "application/octet-stream" + } + }) +} + +output "storage" { + value = local.storage +} +``` + +To test this out, we can create a file `terraform.tfvars` to provide an example +value for `var.storage`: + +```hcl +storage = { + name = "example" + + website = { + error_document = "error.txt" + } + documents = { + "index.html" = { + source_file = "index.html.tmpl" + content_type = "text/html" + } + "error.txt" = { + source_file = "error.txt.tmpl" + content_type = "text/plain" + } + "terraform.exe" = { + source_file = "terraform.exe" + } + } +} +``` + +The above value conforms to the variable's type constraint because it only +omits attributes that are declared as optional. Terraform will automatically +populate those attributes with the value `null` before evaluating anything +else, and then the `defaults` function in `local.storage` will substitute +default values for each of them. + +The result of this `defaults` call would therefore be the following object: + +``` +storage = { + "documents" = tomap({ + "error.txt" = { + "content_type" = "text/plain" + "source_file" = "error.txt.tmpl" + } + "index.html" = { + "content_type" = "text/html" + "source_file" = "index.html.tmpl" + } + "terraform.exe" = { + "content_type" = "application/octet-stream" + "source_file" = "terraform.exe" + } + }) + "enabled" = true + "name" = "example" + "website" = { + "error_document" = "error.txt" + "index_document" = "index.html" + } +} +``` + +Notice that `enabled` and `website.index_document` were both populated directly +from the defaults. Notice also that the `"terraform.exe"` element of +`documents` had its `content_type` attribute populated from the `documents` +default, but the default value didn't need to predict that there would be an +element key `"terraform.exe"` because the default values apply equally to +all elements of the map where the optional attributes are `null`. + +## Using `defaults` elsewhere + +The design of the `defaults` function depends on input values having +well-specified type constraints, so it can reliably recognize the difference +between similar types: maps vs. objects, lists vs. tuples. The type constraint +causes Terraform to convert the caller's value to conform to the constraint +and thus `defaults` can rely on the input to conform. + +Elsewhere in the Terraform language it's typical to be less precise about +types, for example using the object construction syntax `{ ... }` to construct +values that will be used as if they are maps. Because `defaults` uses the +type information of `input_value`, an `input_value` that _doesn't_ originate +in an input variable will tend not to have an appropriate value type and will +thus not be interpreted as expected by `defaults`. + +We recommend using `defaults` only with fully-constrained input variable values +in the first argument, so you can use the variable's type constraint to +explicitly distinguish between collection and structural types. diff --git a/website/docs/configuration/functions/dirname.html.md b/website/docs/language/functions/dirname.html.md similarity index 87% rename from website/docs/configuration/functions/dirname.html.md rename to website/docs/language/functions/dirname.html.md index ddf07d91a..e39150b37 100644 --- a/website/docs/configuration/functions/dirname.html.md +++ b/website/docs/language/functions/dirname.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "dirname - Functions - Configuration Language" sidebar_current: "docs-funcs-file-dirname" description: |- @@ -8,10 +8,6 @@ description: |- # `dirname` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `dirname` takes a string containing a filesystem path and removes the last portion from it. diff --git a/website/docs/configuration/functions/distinct.html.md b/website/docs/language/functions/distinct.html.md similarity index 70% rename from website/docs/configuration/functions/distinct.html.md rename to website/docs/language/functions/distinct.html.md index a28be11da..fc871471e 100644 --- a/website/docs/configuration/functions/distinct.html.md +++ b/website/docs/language/functions/distinct.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "distinct - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-distinct" description: |- @@ -8,10 +8,6 @@ description: |- # `distinct` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `distinct` takes a list and returns a new list with any duplicate elements removed. diff --git a/website/docs/configuration/functions/element.html.md b/website/docs/language/functions/element.html.md similarity index 75% rename from website/docs/configuration/functions/element.html.md rename to website/docs/language/functions/element.html.md index c4f85d9e4..f0f6462c6 100644 --- a/website/docs/configuration/functions/element.html.md +++ b/website/docs/language/functions/element.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "element - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-element" description: |- @@ -8,10 +8,6 @@ description: |- # `element` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `element` retrieves a single element from a list. ```hcl @@ -19,7 +15,7 @@ element(list, index) ``` The index is zero-based. This function produces an error if used with an -empty list. +empty list. The index must be a non-negative integer. Use the built-in index syntax `list[index]` in most cases. Use this function only for the special additional "wrap-around" behavior described below. @@ -39,6 +35,15 @@ If the given index is greater than the length of the list then the index is a ``` +To get the last element from the list use [`length`](./length.html) to find +the size of the list (minus 1 as the list is zero-based) and then pick the +last element: + +``` +> element(["a", "b", "c"], length(["a", "b", "c"])-1) +c +``` + ## Related Functions * [`index`](./index.html) finds the index for a particular element value. diff --git a/website/docs/configuration/functions/file.html.md b/website/docs/language/functions/file.html.md similarity index 84% rename from website/docs/configuration/functions/file.html.md rename to website/docs/language/functions/file.html.md index e34ada9f8..48e96afa2 100644 --- a/website/docs/configuration/functions/file.html.md +++ b/website/docs/language/functions/file.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "file - Functions - Configuration Language" sidebar_current: "docs-funcs-file-file-x" description: |- @@ -9,10 +9,6 @@ description: |- # `file` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `file` reads the contents of a file at the given path and returns them as a string. @@ -31,7 +27,7 @@ dependency graph, so this function cannot be used with files that are generated dynamically during a Terraform operation. We do not recommend using dynamic local files in Terraform configurations, but in rare situations where this is necessary you can use -[the `local_file` data source](/docs/providers/local/d/file.html) +[the `local_file` data source](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) to read files while respecting resource dependencies. ## Examples diff --git a/website/docs/configuration/functions/filebase64.html.md b/website/docs/language/functions/filebase64.html.md similarity index 89% rename from website/docs/configuration/functions/filebase64.html.md rename to website/docs/language/functions/filebase64.html.md index 770840517..1ae07b4f3 100644 --- a/website/docs/configuration/functions/filebase64.html.md +++ b/website/docs/language/functions/filebase64.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "filebase64 - Functions - Configuration Language" sidebar_current: "docs-funcs-file-filebase64" description: |- @@ -9,10 +9,6 @@ description: |- # `filebase64` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `filebase64` reads the contents of a file at the given path and returns them as a base64-encoded string. diff --git a/website/docs/configuration/functions/filebase64sha256.html.md b/website/docs/language/functions/filebase64sha256.html.md similarity index 73% rename from website/docs/configuration/functions/filebase64sha256.html.md rename to website/docs/language/functions/filebase64sha256.html.md index f9e295940..cedc5d3eb 100644 --- a/website/docs/configuration/functions/filebase64sha256.html.md +++ b/website/docs/language/functions/filebase64sha256.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "filebase64sha256 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-filebase64sha256" description: |- @@ -9,10 +9,6 @@ description: |- # `filebase64sha256` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `filebase64sha256` is a variant of [`base64sha256`](./base64sha256.html) that hashes the contents of a given file rather than a literal string. diff --git a/website/docs/configuration/functions/filebase64sha512.html.md b/website/docs/language/functions/filebase64sha512.html.md similarity index 73% rename from website/docs/configuration/functions/filebase64sha512.html.md rename to website/docs/language/functions/filebase64sha512.html.md index 77de9face..6844050e0 100644 --- a/website/docs/configuration/functions/filebase64sha512.html.md +++ b/website/docs/language/functions/filebase64sha512.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "filebase64sha512 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-filebase64sha512" description: |- @@ -9,10 +9,6 @@ description: |- # `filebase64sha512` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `filebase64sha512` is a variant of [`base64sha512`](./base64sha512.html) that hashes the contents of a given file rather than a literal string. diff --git a/website/docs/configuration/functions/fileexists.html.md b/website/docs/language/functions/fileexists.html.md similarity index 80% rename from website/docs/configuration/functions/fileexists.html.md rename to website/docs/language/functions/fileexists.html.md index 0e0760886..019b8e61d 100644 --- a/website/docs/configuration/functions/fileexists.html.md +++ b/website/docs/language/functions/fileexists.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "fileexists - Functions - Configuration Language" sidebar_current: "docs-funcs-file-file-exists" description: |- @@ -8,10 +8,6 @@ description: |- # `fileexists` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `fileexists` determines whether a file exists at a given path. ```hcl diff --git a/website/docs/configuration/functions/filemd5.html.md b/website/docs/language/functions/filemd5.html.md similarity index 71% rename from website/docs/configuration/functions/filemd5.html.md rename to website/docs/language/functions/filemd5.html.md index e9c41c77e..956581767 100644 --- a/website/docs/configuration/functions/filemd5.html.md +++ b/website/docs/language/functions/filemd5.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "filemd5 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-filemd5" description: |- @@ -9,10 +9,6 @@ description: |- # `filemd5` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `filemd5` is a variant of [`md5`](./md5.html) that hashes the contents of a given file rather than a literal string. diff --git a/website/docs/configuration/functions/fileset.html.md b/website/docs/language/functions/fileset.html.md similarity index 85% rename from website/docs/configuration/functions/fileset.html.md rename to website/docs/language/functions/fileset.html.md index c68234dee..147ac08ba 100644 --- a/website/docs/configuration/functions/fileset.html.md +++ b/website/docs/language/functions/fileset.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "fileset - Functions - Configuration Language" sidebar_current: "docs-funcs-file-file-set" description: |- @@ -8,10 +8,6 @@ description: |- # `fileset` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `fileset` enumerates a set of regular file names given a path and pattern. The path is automatically removed from the resulting set of file names and any result still containing path separators always returns forward slash (`/`) as @@ -69,7 +65,7 @@ before Terraform takes any actions. ``` A common use of `fileset` is to create one resource instance per matched file, using -[the `for_each` meta-argument](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings): +[the `for_each` meta-argument](/docs/language/meta-arguments/for_each.html): ```hcl resource "example_thing" "example" { diff --git a/website/docs/configuration/functions/filesha1.html.md b/website/docs/language/functions/filesha1.html.md similarity index 71% rename from website/docs/configuration/functions/filesha1.html.md rename to website/docs/language/functions/filesha1.html.md index fbe9d6208..a1657638e 100644 --- a/website/docs/configuration/functions/filesha1.html.md +++ b/website/docs/language/functions/filesha1.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "filesha1 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-filesha1" description: |- @@ -9,10 +9,6 @@ description: |- # `filesha1` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `filesha1` is a variant of [`sha1`](./sha1.html) that hashes the contents of a given file rather than a literal string. diff --git a/website/docs/configuration/functions/filesha256.html.md b/website/docs/language/functions/filesha256.html.md similarity index 72% rename from website/docs/configuration/functions/filesha256.html.md rename to website/docs/language/functions/filesha256.html.md index ac8b20556..2392b57fa 100644 --- a/website/docs/configuration/functions/filesha256.html.md +++ b/website/docs/language/functions/filesha256.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "filesha256 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-filesha256" description: |- @@ -9,10 +9,6 @@ description: |- # `filesha256` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `filesha256` is a variant of [`sha256`](./sha256.html) that hashes the contents of a given file rather than a literal string. diff --git a/website/docs/configuration/functions/filesha512.html.md b/website/docs/language/functions/filesha512.html.md similarity index 72% rename from website/docs/configuration/functions/filesha512.html.md rename to website/docs/language/functions/filesha512.html.md index 6bf401955..9786df3fc 100644 --- a/website/docs/configuration/functions/filesha512.html.md +++ b/website/docs/language/functions/filesha512.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "filesha512 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-filesha512" description: |- @@ -9,10 +9,6 @@ description: |- # `filesha512` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `filesha512` is a variant of [`sha512`](./sha512.html) that hashes the contents of a given file rather than a literal string. diff --git a/website/docs/configuration/functions/flatten.html.md b/website/docs/language/functions/flatten.html.md similarity index 88% rename from website/docs/configuration/functions/flatten.html.md rename to website/docs/language/functions/flatten.html.md index 57211c415..57c81091a 100644 --- a/website/docs/configuration/functions/flatten.html.md +++ b/website/docs/language/functions/flatten.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "flatten - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-flatten" description: |- @@ -8,10 +8,6 @@ description: |- # `flatten` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `flatten` takes a list and replaces any elements that are lists with a flattened sequence of the list contents. @@ -35,9 +31,9 @@ Indirectly-nested lists, such as those in maps, are _not_ flattened. ## Flattening nested structures for `for_each` The -[resource `for_each`](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) +[resource `for_each`](/docs/language/meta-arguments/for_each.html) and -[`dynamic` block](/docs/configuration/expressions.html#dynamic-blocks) +[`dynamic` block](/docs/language/expressions/dynamic-blocks.html) language features both require a collection value that has one element for each repetition. diff --git a/website/docs/configuration/functions/floor.html.md b/website/docs/language/functions/floor.html.md similarity index 70% rename from website/docs/configuration/functions/floor.html.md rename to website/docs/language/functions/floor.html.md index eb0ad9245..6a1ea1804 100644 --- a/website/docs/configuration/functions/floor.html.md +++ b/website/docs/language/functions/floor.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "floor - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-floor" description: |- @@ -9,10 +9,6 @@ description: |- # `floor` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `floor` returns the closest whole number that is less than or equal to the given value, which may be a fraction. diff --git a/website/docs/configuration/functions/format.html.md b/website/docs/language/functions/format.html.md similarity index 96% rename from website/docs/configuration/functions/format.html.md rename to website/docs/language/functions/format.html.md index 5d295d22e..7fb8c9a8a 100644 --- a/website/docs/configuration/functions/format.html.md +++ b/website/docs/language/functions/format.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "format - Functions - Configuration Language" sidebar_current: "docs-funcs-string-format-x" description: |- @@ -9,10 +9,6 @@ description: |- # `format` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `format` produces a string by formatting a number of other values according to a specification string. It is similar to the `printf` function in C, and other similar functions in other programming languages. diff --git a/website/docs/configuration/functions/formatdate.html.md b/website/docs/language/functions/formatdate.html.md similarity index 96% rename from website/docs/configuration/functions/formatdate.html.md rename to website/docs/language/functions/formatdate.html.md index ee31f3917..c519b6eff 100644 --- a/website/docs/configuration/functions/formatdate.html.md +++ b/website/docs/language/functions/formatdate.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "formatdate - Functions - Configuration Language" sidebar_current: "docs-funcs-datetime-formatdate" description: |- @@ -8,10 +8,6 @@ description: |- # `formatdate` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `formatdate` converts a timestamp into a different time format. ```hcl diff --git a/website/docs/configuration/functions/formatlist.html.md b/website/docs/language/functions/formatlist.html.md similarity index 86% rename from website/docs/configuration/functions/formatlist.html.md rename to website/docs/language/functions/formatlist.html.md index 8beae17d2..16043611a 100644 --- a/website/docs/configuration/functions/formatlist.html.md +++ b/website/docs/language/functions/formatlist.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "formatlist - Functions - Configuration Language" sidebar_current: "docs-funcs-string-formatlist" description: |- @@ -9,10 +9,6 @@ description: |- # `formatlist` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `formatlist` produces a list of strings by formatting a number of other values according to a specification string. diff --git a/website/docs/configuration/functions/indent.html.md b/website/docs/language/functions/indent.html.md similarity index 78% rename from website/docs/configuration/functions/indent.html.md rename to website/docs/language/functions/indent.html.md index 54067213d..40c995075 100644 --- a/website/docs/configuration/functions/indent.html.md +++ b/website/docs/language/functions/indent.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "indent - Functions - Configuration Language" sidebar_current: "docs-funcs-string-indent" description: |- @@ -9,10 +9,6 @@ description: |- # `indent` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `indent` adds a given number of spaces to the beginnings of all but the first line in a given multi-line string. diff --git a/website/docs/configuration/functions.html.md b/website/docs/language/functions/index.html.md similarity index 71% rename from website/docs/configuration/functions.html.md rename to website/docs/language/functions/index.html.md index 2b866e2fc..f062b32a7 100644 --- a/website/docs/configuration/functions.html.md +++ b/website/docs/language/functions/index.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "Functions - Configuration Language" sidebar_current: "docs-config-functions" description: |- @@ -9,9 +9,7 @@ description: |- # Built-in Functions --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../configuration-0-11/interpolation.html). +> **Hands-on:** Try the [Perform Dynamic Operations with Functions](https://learn.hashicorp.com/tutorials/terraform/functions?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. The Terraform language includes a number of built-in functions that you can call from within expressions to transform and combine values. The general @@ -23,8 +21,8 @@ max(5, 12, 9) ``` For more details on syntax, see -[_Function Calls_](./expressions.html#function-calls) -on the Expressions page. +[_Function Calls_](/docs/language/expressions/function-calls.html) +in the Expressions section. The Terraform language does not support user-defined functions, and so only the functions built in to the language are available for use. The navigation @@ -32,7 +30,7 @@ for this section includes a list of all of the available built-in functions. You can experiment with the behavior of Terraform's built-in functions from the Terraform expression console, by running -[the `terraform console` command](/docs/commands/console.html): +[the `terraform console` command](/docs/cli/commands/console.html): ``` > max(5, 12, 9) diff --git a/website/docs/configuration/functions/index.html.md b/website/docs/language/functions/index_function.html.md similarity index 73% rename from website/docs/configuration/functions/index.html.md rename to website/docs/language/functions/index_function.html.md index 59575c91c..6d8b3eb13 100644 --- a/website/docs/configuration/functions/index.html.md +++ b/website/docs/language/functions/index_function.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "index - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-index" description: |- @@ -8,10 +8,6 @@ description: |- # `index` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `index` finds the element index for a given value in a list. ```hcl diff --git a/website/docs/configuration/functions/join.html.md b/website/docs/language/functions/join.html.md similarity index 75% rename from website/docs/configuration/functions/join.html.md rename to website/docs/language/functions/join.html.md index a2fc637f4..e3c609fb0 100644 --- a/website/docs/configuration/functions/join.html.md +++ b/website/docs/language/functions/join.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "join - Functions - Configuration Language" sidebar_current: "docs-funcs-string-join" description: |- @@ -9,10 +9,6 @@ description: |- # `join` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `join` produces a string by concatenating together all elements of a given list of strings with the given delimiter. diff --git a/website/docs/configuration/functions/jsondecode.html.md b/website/docs/language/functions/jsondecode.html.md similarity index 84% rename from website/docs/configuration/functions/jsondecode.html.md rename to website/docs/language/functions/jsondecode.html.md index 6e569bb3d..aacf2874a 100644 --- a/website/docs/configuration/functions/jsondecode.html.md +++ b/website/docs/language/functions/jsondecode.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "jsondecode - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-jsondecode" description: |- @@ -9,17 +9,13 @@ description: |- # `jsondecode` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `jsondecode` interprets a given string as JSON, returning a representation of the result of decoding that string. The JSON encoding is defined in [RFC 7159](https://tools.ietf.org/html/rfc7159). This function maps JSON values to -[Terraform language values](../expressions.html#types-and-values) +[Terraform language values](/docs/language/expressions/types.html) in the following way: | JSON type | Terraform type | diff --git a/website/docs/configuration/functions/jsonencode.html.md b/website/docs/language/functions/jsonencode.html.md similarity index 84% rename from website/docs/configuration/functions/jsonencode.html.md rename to website/docs/language/functions/jsonencode.html.md index 6f9376eab..06f13fa84 100644 --- a/website/docs/configuration/functions/jsonencode.html.md +++ b/website/docs/language/functions/jsonencode.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "jsonencode - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-jsonencode" description: |- @@ -8,16 +8,12 @@ description: |- # `jsonencode` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `jsonencode` encodes a given value to a string using JSON syntax. The JSON encoding is defined in [RFC 7159](https://tools.ietf.org/html/rfc7159). This function maps -[Terraform language values](../expressions.html#types-and-values) +[Terraform language values](/docs/language/expressions/types.html) to JSON values in the following way: | Terraform type | JSON type | diff --git a/website/docs/configuration/functions/keys.html.md b/website/docs/language/functions/keys.html.md similarity index 73% rename from website/docs/configuration/functions/keys.html.md rename to website/docs/language/functions/keys.html.md index 87097a1de..46a65e48d 100644 --- a/website/docs/configuration/functions/keys.html.md +++ b/website/docs/language/functions/keys.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "keys - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-keys" description: |- @@ -8,10 +8,6 @@ description: |- # `keys` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `keys` takes a map and returns a list containing the keys from that map. The keys are returned in lexicographical order, ensuring that the result will diff --git a/website/docs/configuration/functions/length.html.md b/website/docs/language/functions/length.html.md similarity index 84% rename from website/docs/configuration/functions/length.html.md rename to website/docs/language/functions/length.html.md index b4cde65cd..5fd70ddb7 100644 --- a/website/docs/configuration/functions/length.html.md +++ b/website/docs/language/functions/length.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "length - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-length" description: |- @@ -8,10 +8,6 @@ description: |- # `length` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `length` determines the length of a given list, map, or string. If given a list or map, the result is the number of elements in that collection. diff --git a/website/docs/language/functions/list.html.md b/website/docs/language/functions/list.html.md new file mode 100644 index 000000000..3330d330c --- /dev/null +++ b/website/docs/language/functions/list.html.md @@ -0,0 +1,29 @@ +--- +layout: "language" +page_title: "list - Functions - Configuration Language" +sidebar_current: "docs-funcs-collection-list" +description: |- + The list function constructs a list from some given elements. +--- + +# `list` Function + +The `list` function is no longer available. Prior to Terraform v0.12 it was +the only available syntax for writing a literal list inside an expression, +but Terraform v0.12 introduced a new first-class syntax. + +To update an expression like `list(a, b, c)`, write the following instead: + +``` +tolist([a, b, c]) +``` + +The `[ ... ]` brackets construct a tuple value, and then the `tolist` function +then converts it to a list. For more information on the value types in the +Terraform language, see [Type Constraints](/docs/language/expressions/types.html). + +## Related Functions + +* [`concat`](./concat.html) produces a new list by concatenating together the + elements from other lists. +* [`tolist`](./tolist.html) converts a set or tuple value to a list. diff --git a/website/docs/configuration/functions/log.html.md b/website/docs/language/functions/log.html.md similarity index 73% rename from website/docs/configuration/functions/log.html.md rename to website/docs/language/functions/log.html.md index 86976e511..28ade0a7f 100644 --- a/website/docs/configuration/functions/log.html.md +++ b/website/docs/language/functions/log.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "log - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-log" description: |- @@ -8,10 +8,6 @@ description: |- # `log` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `log` returns the logarithm of a given number in a given base. ```hcl diff --git a/website/docs/configuration/functions/lookup.html.md b/website/docs/language/functions/lookup.html.md similarity index 72% rename from website/docs/configuration/functions/lookup.html.md rename to website/docs/language/functions/lookup.html.md index 9dae41afa..e41ece732 100644 --- a/website/docs/configuration/functions/lookup.html.md +++ b/website/docs/language/functions/lookup.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "lookup - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-lookup" description: |- @@ -8,12 +8,8 @@ description: |- # `lookup` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `lookup` retrieves the value of a single element from a map, given its key. -If the given key does not exist, a the given default value is returned instead. +If the given key does not exist, the given default value is returned instead. ``` lookup(map, key, default) diff --git a/website/docs/configuration/functions/lower.html.md b/website/docs/language/functions/lower.html.md similarity index 74% rename from website/docs/configuration/functions/lower.html.md rename to website/docs/language/functions/lower.html.md index 15dd95eb6..a767859fd 100644 --- a/website/docs/configuration/functions/lower.html.md +++ b/website/docs/language/functions/lower.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "lower - Functions - Configuration Language" sidebar_current: "docs-funcs-string-lower" description: |- @@ -8,10 +8,6 @@ description: |- # `lower` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `lower` converts all cased letters in the given string to lowercase. ## Examples diff --git a/website/docs/language/functions/map.html.md b/website/docs/language/functions/map.html.md new file mode 100644 index 000000000..b05744d59 --- /dev/null +++ b/website/docs/language/functions/map.html.md @@ -0,0 +1,32 @@ +--- +layout: "language" +page_title: "map - Functions - Configuration Language" +sidebar_current: "docs-funcs-collection-map" +description: |- + The map function constructs a map from some given elements. +--- + +# `map` Function + +The `map` function is no longer available. Prior to Terraform v0.12 it was +the only available syntax for writing a literal map inside an expression, +but Terraform v0.12 introduced a new first-class syntax. + +To update an expression like `map("a", "b", "c", "d")`, write the following instead: + +``` +tomap({ + a = "b" + c = "d" +}) +``` + +The `{ ... }` braces construct an object value, and then the `tomap` function +then converts it to a map. For more information on the value types in the +Terraform language, see [Type Constraints](/docs/language/expressions/types.html). + +## Related Functions + +* [`tomap`](./tomap.html) converts an object value to a map. +* [`zipmap`](./zipmap.html) constructs a map dynamically, by taking keys from + one list and values from another list. diff --git a/website/docs/configuration/functions/matchkeys.html.md b/website/docs/language/functions/matchkeys.html.md similarity index 90% rename from website/docs/configuration/functions/matchkeys.html.md rename to website/docs/language/functions/matchkeys.html.md index 552fb0428..390e24a25 100644 --- a/website/docs/configuration/functions/matchkeys.html.md +++ b/website/docs/language/functions/matchkeys.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "matchkeys - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-matchkeys" description: |- @@ -9,10 +9,6 @@ description: |- # `matchkeys` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `matchkeys` constructs a new list by taking a subset of elements from one list whose indexes match the corresponding indexes of values in another list. diff --git a/website/docs/configuration/functions/max.html.md b/website/docs/language/functions/max.html.md similarity index 72% rename from website/docs/configuration/functions/max.html.md rename to website/docs/language/functions/max.html.md index 42e89d32a..33a458189 100644 --- a/website/docs/configuration/functions/max.html.md +++ b/website/docs/language/functions/max.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "max - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-max" description: |- @@ -8,10 +8,6 @@ description: |- # `max` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `max` takes one or more numbers and returns the greatest number from the set. ## Examples diff --git a/website/docs/configuration/functions/md5.html.md b/website/docs/language/functions/md5.html.md similarity index 81% rename from website/docs/configuration/functions/md5.html.md rename to website/docs/language/functions/md5.html.md index ba3935d48..67c9330eb 100644 --- a/website/docs/configuration/functions/md5.html.md +++ b/website/docs/language/functions/md5.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "md5 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-md5" description: |- @@ -9,10 +9,6 @@ description: |- # `md5` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `md5` computes the MD5 hash of a given string and encodes it with hexadecimal digits. diff --git a/website/docs/configuration/functions/merge.html.md b/website/docs/language/functions/merge.html.md similarity index 82% rename from website/docs/configuration/functions/merge.html.md rename to website/docs/language/functions/merge.html.md index edcc0a18a..d01d551bb 100644 --- a/website/docs/configuration/functions/merge.html.md +++ b/website/docs/language/functions/merge.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "merge - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-merge" description: |- @@ -10,10 +10,6 @@ description: |- # `merge` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `merge` takes an arbitrary number of maps or objects, and returns a single map or object that contains a merged set of elements from all arguments. diff --git a/website/docs/configuration/functions/min.html.md b/website/docs/language/functions/min.html.md similarity index 72% rename from website/docs/configuration/functions/min.html.md rename to website/docs/language/functions/min.html.md index 5c1411a75..e9712dce1 100644 --- a/website/docs/configuration/functions/min.html.md +++ b/website/docs/language/functions/min.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "min - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-min" description: |- @@ -8,10 +8,6 @@ description: |- # `min` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `min` takes one or more numbers and returns the smallest number from the set. ## Examples diff --git a/website/docs/configuration/functions/parseint.html.md b/website/docs/language/functions/parseint.html.md similarity index 85% rename from website/docs/configuration/functions/parseint.html.md rename to website/docs/language/functions/parseint.html.md index 44c7a9cfa..f7ca35d55 100644 --- a/website/docs/configuration/functions/parseint.html.md +++ b/website/docs/language/functions/parseint.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "parseint - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-parseint" description: |- @@ -8,10 +8,6 @@ description: |- # `parseint` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `parseint` parses the given string as a representation of an integer in the specified base and returns the resulting number. The base must be between 2 and 62 inclusive. diff --git a/website/docs/configuration/functions/pathexpand.html.md b/website/docs/language/functions/pathexpand.html.md similarity index 89% rename from website/docs/configuration/functions/pathexpand.html.md rename to website/docs/language/functions/pathexpand.html.md index 48be103ad..d7fccc3b0 100644 --- a/website/docs/configuration/functions/pathexpand.html.md +++ b/website/docs/language/functions/pathexpand.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "pathexpand - Functions - Configuration Language" sidebar_current: "docs-funcs-file-pathexpand" description: |- @@ -9,10 +9,6 @@ description: |- # `pathexpand` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `pathexpand` takes a filesystem path that might begin with a `~` segment, and if so it replaces that segment with the current user's home directory path. diff --git a/website/docs/configuration/functions/pow.html.md b/website/docs/language/functions/pow.html.md similarity index 61% rename from website/docs/configuration/functions/pow.html.md rename to website/docs/language/functions/pow.html.md index 154e28404..baf73014b 100644 --- a/website/docs/configuration/functions/pow.html.md +++ b/website/docs/language/functions/pow.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "pow - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-pow" description: |- @@ -8,10 +8,6 @@ description: |- # `pow` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `pow` calculates an exponent, by raising its first argument to the power of the second argument. ## Examples diff --git a/website/docs/configuration/functions/range.html.md b/website/docs/language/functions/range.html.md similarity index 92% rename from website/docs/configuration/functions/range.html.md rename to website/docs/language/functions/range.html.md index 21bf4a3b4..bdb538ec8 100644 --- a/website/docs/configuration/functions/range.html.md +++ b/website/docs/language/functions/range.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "range - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-range" description: |- @@ -8,10 +8,6 @@ description: |- # `range` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `range` generates a list of numbers using a start value, a limit value, and a step value. diff --git a/website/docs/configuration/functions/regex.html.md b/website/docs/language/functions/regex.html.md similarity index 97% rename from website/docs/configuration/functions/regex.html.md rename to website/docs/language/functions/regex.html.md index 875ac65b5..790870b96 100644 --- a/website/docs/configuration/functions/regex.html.md +++ b/website/docs/language/functions/regex.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "regex - Functions - Configuration Language" sidebar_current: "docs-funcs-string-regex" description: |- @@ -9,10 +9,6 @@ description: |- # `regex` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `regex` applies a [regular expression](https://en.wikipedia.org/wiki/Regular_expression) to a string and returns the matching substrings. @@ -77,7 +73,7 @@ of the pattern must be escaped as `\\`. | `\PN` | The opposite of `\pN` | | `\P{Greek}` | The opposite of `\p{Greek}` | | `xy` | `x` followed immediately by `y` | -| `x|y` | either `x` or `y`, preferring `x` | +| x|y | either `x` or `y`, preferring `x` | | `x*` | zero or more `x`, preferring more | | `x*?` | zero or more `x`, preferring fewer | | `x+` | one or more `x`, preferring more | diff --git a/website/docs/configuration/functions/regexall.html.md b/website/docs/language/functions/regexall.html.md similarity index 88% rename from website/docs/configuration/functions/regexall.html.md rename to website/docs/language/functions/regexall.html.md index fc8f495ca..5b5928d87 100644 --- a/website/docs/configuration/functions/regexall.html.md +++ b/website/docs/language/functions/regexall.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "regexall - Functions - Configuration Language" sidebar_current: "docs-funcs-string-regexall" description: |- @@ -8,10 +8,6 @@ description: |- # `regexall` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `regexall` applies a [regular expression](https://en.wikipedia.org/wiki/Regular_expression) to a string and returns a list of all matches. diff --git a/website/docs/configuration/functions/replace.html.md b/website/docs/language/functions/replace.html.md similarity index 83% rename from website/docs/configuration/functions/replace.html.md rename to website/docs/language/functions/replace.html.md index ea2dd7119..8475e26ae 100644 --- a/website/docs/configuration/functions/replace.html.md +++ b/website/docs/language/functions/replace.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "replace - Functions - Configuration Language" sidebar_current: "docs-funcs-string-replace" description: |- @@ -9,10 +9,6 @@ description: |- # `replace` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `replace` searches a given string for another given substring, and replaces each occurrence with a given replacement string. diff --git a/website/docs/configuration/functions/reverse.html.md b/website/docs/language/functions/reverse.html.md similarity index 69% rename from website/docs/configuration/functions/reverse.html.md rename to website/docs/language/functions/reverse.html.md index d9febecad..deeb3a177 100644 --- a/website/docs/configuration/functions/reverse.html.md +++ b/website/docs/language/functions/reverse.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "reverse - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-reverse" description: |- @@ -8,10 +8,6 @@ description: |- # `reverse` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `reverse` takes a sequence and produces a new sequence of the same length with all of the same elements as the given sequence but in reverse order. diff --git a/website/docs/configuration/functions/rsadecrypt.html.md b/website/docs/language/functions/rsadecrypt.html.md similarity index 82% rename from website/docs/configuration/functions/rsadecrypt.html.md rename to website/docs/language/functions/rsadecrypt.html.md index 6b09ffa6b..1cf0b04b4 100644 --- a/website/docs/configuration/functions/rsadecrypt.html.md +++ b/website/docs/language/functions/rsadecrypt.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "rsadecrypt - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-rsadecrypt" description: |- @@ -8,10 +8,6 @@ description: |- # `rsadecrypt` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `rsadecrypt` decrypts an RSA-encrypted ciphertext, returning the corresponding cleartext. diff --git a/website/docs/configuration/functions/setintersection.html.md b/website/docs/language/functions/setintersection.html.md similarity index 85% rename from website/docs/configuration/functions/setintersection.html.md rename to website/docs/language/functions/setintersection.html.md index 6444aeb8b..c582cc0cf 100644 --- a/website/docs/configuration/functions/setintersection.html.md +++ b/website/docs/language/functions/setintersection.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "setintersection - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-setintersection" description: |- @@ -9,10 +9,6 @@ description: |- # `setintersection` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - The `setintersection` function takes multiple sets and produces a single set containing only the elements that all of the given sets have in common. In other words, it computes the diff --git a/website/docs/configuration/functions/setproduct.html.md b/website/docs/language/functions/setproduct.html.md similarity index 92% rename from website/docs/configuration/functions/setproduct.html.md rename to website/docs/language/functions/setproduct.html.md index 6829ce8e7..030ab2c32 100644 --- a/website/docs/configuration/functions/setproduct.html.md +++ b/website/docs/language/functions/setproduct.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "setproduct - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-setproduct" description: |- @@ -9,10 +9,6 @@ description: |- # `setproduct` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - The `setproduct` function finds all of the possible combinations of elements from all of the given sets by computing the [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product). @@ -121,9 +117,9 @@ elements all have a consistent type: ## Finding combinations for `for_each` The -[resource `for_each`](/docs/configuration/resources.html#for_each-multiple-resource-instances-defined-by-a-map-or-set-of-strings) +[resource `for_each`](/docs/language/meta-arguments/for_each.html) and -[`dynamic` block](/docs/configuration/expressions.html#dynamic-blocks) +[`dynamic` block](/docs/language/expressions/dynamic-blocks.html) language features both require a collection value that has one element for each repetition. @@ -208,7 +204,7 @@ resource "aws_subnet" "example" { vpc_id = each.value.network_id availability_zone = each.value.subnet_key - cidr_block = each.value_cidr_block + cidr_block = each.value.cidr_block } ``` diff --git a/website/docs/configuration/functions/setsubtract.html.md b/website/docs/language/functions/setsubtract.html.md similarity index 84% rename from website/docs/configuration/functions/setsubtract.html.md rename to website/docs/language/functions/setsubtract.html.md index 0bf3b7acc..8b95b203a 100644 --- a/website/docs/configuration/functions/setsubtract.html.md +++ b/website/docs/language/functions/setsubtract.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "setsubtract - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-setsubtract" description: |- @@ -9,10 +9,6 @@ description: |- # `setsubtract` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - The `setsubtract` function returns a new set containing the elements from the first set that are not present in the second set. In other words, it computes the [relative complement](https://en.wikipedia.org/wiki/Complement_(set_theory)#Relative_complement) of the first set in the second set. diff --git a/website/docs/configuration/functions/setunion.html.md b/website/docs/language/functions/setunion.html.md similarity index 84% rename from website/docs/configuration/functions/setunion.html.md rename to website/docs/language/functions/setunion.html.md index 41103e588..2784f5d87 100644 --- a/website/docs/configuration/functions/setunion.html.md +++ b/website/docs/language/functions/setunion.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "setunion - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-setunion" description: |- @@ -9,10 +9,6 @@ description: |- # `setunion` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - The `setunion` function takes multiple sets and produces a single set containing the elements from all of the given sets. In other words, it computes the [union](https://en.wikipedia.org/wiki/Union_(set_theory)) of diff --git a/website/docs/configuration/functions/sha1.html.md b/website/docs/language/functions/sha1.html.md similarity index 82% rename from website/docs/configuration/functions/sha1.html.md rename to website/docs/language/functions/sha1.html.md index df44b43a1..3b1b75930 100644 --- a/website/docs/configuration/functions/sha1.html.md +++ b/website/docs/language/functions/sha1.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "sha1 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-sha1" description: |- @@ -9,10 +9,6 @@ description: |- # `sha1` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `sha1` computes the SHA1 hash of a given string and encodes it with hexadecimal digits. diff --git a/website/docs/configuration/functions/sha256.html.md b/website/docs/language/functions/sha256.html.md similarity index 82% rename from website/docs/configuration/functions/sha256.html.md rename to website/docs/language/functions/sha256.html.md index e044e7d51..c71157d84 100644 --- a/website/docs/configuration/functions/sha256.html.md +++ b/website/docs/language/functions/sha256.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "sha256 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-sha256" description: |- @@ -9,10 +9,6 @@ description: |- # `sha256` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `sha256` computes the SHA256 hash of a given string and encodes it with hexadecimal digits. diff --git a/website/docs/configuration/functions/sha512.html.md b/website/docs/language/functions/sha512.html.md similarity index 82% rename from website/docs/configuration/functions/sha512.html.md rename to website/docs/language/functions/sha512.html.md index deb1ab3b4..b36c6bf9c 100644 --- a/website/docs/configuration/functions/sha512.html.md +++ b/website/docs/language/functions/sha512.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "sha512 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-sha512" description: |- @@ -9,10 +9,6 @@ description: |- # `sha512` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `sha512` computes the SHA512 hash of a given string and encodes it with hexadecimal digits. diff --git a/website/docs/configuration/functions/signum.html.md b/website/docs/language/functions/signum.html.md similarity index 63% rename from website/docs/configuration/functions/signum.html.md rename to website/docs/language/functions/signum.html.md index 93d625069..e81365880 100644 --- a/website/docs/configuration/functions/signum.html.md +++ b/website/docs/language/functions/signum.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "signum - Functions - Configuration Language" sidebar_current: "docs-funcs-numeric-signum" description: |- @@ -8,10 +8,6 @@ description: |- # `signum` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `signum` determines the sign of a number, returning a number between -1 and 1 to represent the sign. diff --git a/website/docs/configuration/functions/slice.html.md b/website/docs/language/functions/slice.html.md similarity index 77% rename from website/docs/configuration/functions/slice.html.md rename to website/docs/language/functions/slice.html.md index 44c88f2ce..d6672386c 100644 --- a/website/docs/configuration/functions/slice.html.md +++ b/website/docs/language/functions/slice.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "slice - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-slice" description: |- @@ -8,10 +8,6 @@ description: |- # `slice` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `slice` extracts some consecutive elements from within a list. ```hcl diff --git a/website/docs/configuration/functions/sort.html.md b/website/docs/language/functions/sort.html.md similarity index 72% rename from website/docs/configuration/functions/sort.html.md rename to website/docs/language/functions/sort.html.md index 598c035e7..6e50ae9f4 100644 --- a/website/docs/configuration/functions/sort.html.md +++ b/website/docs/language/functions/sort.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "sort - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-sort" description: |- @@ -9,10 +9,6 @@ description: |- # `sort` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `sort` takes a list of strings and returns a new list with those strings sorted lexicographically. diff --git a/website/docs/configuration/functions/split.html.md b/website/docs/language/functions/split.html.md similarity index 76% rename from website/docs/configuration/functions/split.html.md rename to website/docs/language/functions/split.html.md index 49b5ece5e..a777b7b5a 100644 --- a/website/docs/configuration/functions/split.html.md +++ b/website/docs/language/functions/split.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "split - Functions - Configuration Language" sidebar_current: "docs-funcs-string-split" description: |- @@ -9,10 +9,6 @@ description: |- # `split` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `split` produces a list by dividing a given string at all occurrences of a given separator. diff --git a/website/docs/configuration/functions/strrev.html.md b/website/docs/language/functions/strrev.html.md similarity index 96% rename from website/docs/configuration/functions/strrev.html.md rename to website/docs/language/functions/strrev.html.md index 630a68263..e2361bf58 100644 --- a/website/docs/configuration/functions/strrev.html.md +++ b/website/docs/language/functions/strrev.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "strrev - Functions - Configuration Language" sidebar_current: "docs-funcs-string-strrev" description: |- diff --git a/website/docs/configuration/functions/substr.html.md b/website/docs/language/functions/substr.html.md similarity index 71% rename from website/docs/configuration/functions/substr.html.md rename to website/docs/language/functions/substr.html.md index d9579d12a..21b3bbc31 100644 --- a/website/docs/configuration/functions/substr.html.md +++ b/website/docs/language/functions/substr.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "substr - Functions - Configuration Language" sidebar_current: "docs-funcs-string-substr" description: |- @@ -9,10 +9,6 @@ description: |- # `substr` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `substr` extracts a substring from a given string by offset and length. ```hcl diff --git a/website/docs/configuration/functions/sum.html.md b/website/docs/language/functions/sum.html.md similarity index 62% rename from website/docs/configuration/functions/sum.html.md rename to website/docs/language/functions/sum.html.md index 20958974d..63c241843 100644 --- a/website/docs/configuration/functions/sum.html.md +++ b/website/docs/language/functions/sum.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "sum - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-sum" description: |- @@ -9,10 +9,6 @@ description: |- # `sum` Function --> **Note:** This page is about Terraform 0.13 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `sum` takes a list or set of numbers and returns the sum of those numbers. diff --git a/website/docs/configuration/functions/templatefile.html.md b/website/docs/language/functions/templatefile.html.md similarity index 83% rename from website/docs/configuration/functions/templatefile.html.md rename to website/docs/language/functions/templatefile.html.md index 64d7cf63c..95bc2b04f 100644 --- a/website/docs/configuration/functions/templatefile.html.md +++ b/website/docs/language/functions/templatefile.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "templatefile - Functions - Configuration Language" sidebar_current: "docs-funcs-file-templatefile" description: |- @@ -9,10 +9,6 @@ description: |- # `templatefile` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `templatefile` reads the file at the given path and renders its content as a template using a supplied set of template variables. @@ -21,10 +17,10 @@ templatefile(path, vars) ``` The template syntax is the same as for -[string templates](../expressions.html#string-templates) in the main Terraform -language, including interpolation sequences delimited with `${` ... `}`. -This function just allows longer template sequences to be factored out -into a separate file for readability. +[string templates](/docs/language/expressions/strings.html#string-templates) +in the main Terraform language, including interpolation sequences delimited with +`${` ... `}`. This function just allows longer template sequences to be factored +out into a separate file for readability. The "vars" argument must be a map. Within the template file, each of the keys in the map is available as a variable for interpolation. The template may @@ -78,8 +74,8 @@ The `templatefile` function renders the template: ``` > templatefile( - "${path.module}/backends.tmpl", - { + "${path.module}/config.tmpl", + { config = { "x" = "y" "foo" = "bar" @@ -102,7 +98,7 @@ interpolation sequences and directives. Instead, you can write a template that consists only of a single interpolated call to either [`jsonencode`](./jsonencode.html) or [`yamlencode`](./yamlencode.html), specifying the value to encode using -[normal Terraform expression syntax](/docs/configuration/expressions.html) +[normal Terraform expression syntax](/docs/language/expressions/index.html) as in the following examples: ``` @@ -122,9 +118,9 @@ this will produce a valid JSON or YAML representation of the given data structure, without the need to manually handle escaping or delimiters. In the latest examples above, the repetition based on elements of `ip_addrs` is achieved by using a -[`for` expression](/docs/configuration/expressions.html#for-expressions) +[`for` expression](/docs/language/expressions/for.html) rather than by using -[template directives](/docs/configuration/expressions.html#directives). +[template directives](/docs/language/expressions/strings.html#directives). ```json {"backends":["10.0.0.1:8080","10.0.0.2:8080"]} diff --git a/website/docs/configuration/functions/textdecodebase64.html.md b/website/docs/language/functions/textdecodebase64.html.md similarity index 93% rename from website/docs/configuration/functions/textdecodebase64.html.md rename to website/docs/language/functions/textdecodebase64.html.md index 6d04b30c2..428dd5ca6 100644 --- a/website/docs/configuration/functions/textdecodebase64.html.md +++ b/website/docs/language/functions/textdecodebase64.html.md @@ -1,10 +1,10 @@ --- -layout: "functions" +layout: "language" page_title: "textdecodebase64 - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-textdecodebase64" description: |- The textdecodebase64 function decodes a string that was previously Base64-encoded, -and then interprets the result as characters in a specified character encoding. + and then interprets the result as characters in a specified character encoding. --- # `textdecodebase64` Function diff --git a/website/docs/configuration/functions/textencodebase64.html.md b/website/docs/language/functions/textencodebase64.html.md similarity index 95% rename from website/docs/configuration/functions/textencodebase64.html.md rename to website/docs/language/functions/textencodebase64.html.md index 02e436695..3eec900bf 100644 --- a/website/docs/configuration/functions/textencodebase64.html.md +++ b/website/docs/language/functions/textencodebase64.html.md @@ -1,10 +1,10 @@ --- -layout: "functions" +layout: "language" page_title: "textencodebase64 - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-textencodebase64" description: |- The textencodebase64 function encodes the unicode characters in a given string using a -specified character encoding, returning the result base64 encoded. + specified character encoding, returning the result base64 encoded. --- # `textencodebase64` Function diff --git a/website/docs/configuration/functions/timeadd.html.md b/website/docs/language/functions/timeadd.html.md similarity index 83% rename from website/docs/configuration/functions/timeadd.html.md rename to website/docs/language/functions/timeadd.html.md index 926806c66..5ba5800d4 100644 --- a/website/docs/configuration/functions/timeadd.html.md +++ b/website/docs/language/functions/timeadd.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "timeadd - Functions - Configuration Language" sidebar_current: "docs-funcs-datetime-timeadd" description: |- @@ -9,10 +9,6 @@ description: |- # `timeadd` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `timeadd` adds a duration to a timestamp, returning a new timestamp. ```hcl diff --git a/website/docs/configuration/functions/timestamp.html.md b/website/docs/language/functions/timestamp.html.md similarity index 82% rename from website/docs/configuration/functions/timestamp.html.md rename to website/docs/language/functions/timestamp.html.md index e3365406d..5ad73efd2 100644 --- a/website/docs/configuration/functions/timestamp.html.md +++ b/website/docs/language/functions/timestamp.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "timestamp - Functions - Configuration Language" sidebar_current: "docs-funcs-datetime-timestamp" description: |- @@ -9,10 +9,6 @@ description: |- # `timestamp` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `timestamp` returns a UTC timestamp string in [RFC 3339](https://tools.ietf.org/html/rfc3339) format. In the Terraform language, timestamps are conventionally represented as @@ -24,7 +20,7 @@ The result of this function will change every second, so using this function directly with resource attributes will cause a diff to be detected on every Terraform run. We do not recommend using this function in resource attributes, but in rare cases it can be used in conjunction with -[the `ignore_changes` lifecycle meta-argument](../resources.html#ignore_changes) +[the `ignore_changes` lifecycle meta-argument](/docs/language/meta-arguments/lifecycle.html#ignore_changes) to take the timestamp only on initial creation of the resource. For more stable time handling, see the [Time Provider](https://registry.terraform.io/providers/hashicorp/time/). diff --git a/website/docs/configuration/functions/title.html.md b/website/docs/language/functions/title.html.md similarity index 74% rename from website/docs/configuration/functions/title.html.md rename to website/docs/language/functions/title.html.md index 7027d48f1..fef24ddd4 100644 --- a/website/docs/configuration/functions/title.html.md +++ b/website/docs/language/functions/title.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "title - Functions - Configuration Language" sidebar_current: "docs-funcs-string-title" description: |- @@ -9,10 +9,6 @@ description: |- # `title` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `title` converts the first letter of each word in the given string to uppercase. ## Examples diff --git a/website/docs/configuration/functions/tobool.html.md b/website/docs/language/functions/tobool.html.md similarity index 81% rename from website/docs/configuration/functions/tobool.html.md rename to website/docs/language/functions/tobool.html.md index cfb8c83c0..4ff160929 100644 --- a/website/docs/configuration/functions/tobool.html.md +++ b/website/docs/language/functions/tobool.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "tobool - Functions - Configuration Language" sidebar_current: "docs-funcs-conversion-tobool" description: |- @@ -8,10 +8,6 @@ description: |- # `tobool` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `tobool` converts its argument to a boolean value. Explicit type conversions are rarely necessary in Terraform because it will diff --git a/website/docs/configuration/functions/tolist.html.md b/website/docs/language/functions/tolist.html.md similarity index 81% rename from website/docs/configuration/functions/tolist.html.md rename to website/docs/language/functions/tolist.html.md index 3c114a761..682a7e538 100644 --- a/website/docs/configuration/functions/tolist.html.md +++ b/website/docs/language/functions/tolist.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "tolist - Functions - Configuration Language" sidebar_current: "docs-funcs-conversion-tolist" description: |- @@ -8,10 +8,6 @@ description: |- # `tolist` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `tolist` converts its argument to a list value. Explicit type conversions are rarely necessary in Terraform because it will diff --git a/website/docs/configuration/functions/tomap.html.md b/website/docs/language/functions/tomap.html.md similarity index 78% rename from website/docs/configuration/functions/tomap.html.md rename to website/docs/language/functions/tomap.html.md index 7adc81930..da28a4d89 100644 --- a/website/docs/configuration/functions/tomap.html.md +++ b/website/docs/language/functions/tomap.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "tomap - Functions - Configuration Language" sidebar_current: "docs-funcs-conversion-tomap" description: |- @@ -8,10 +8,6 @@ description: |- # `tomap` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `tomap` converts its argument to a map value. Explicit type conversions are rarely necessary in Terraform because it will diff --git a/website/docs/configuration/functions/tonumber.html.md b/website/docs/language/functions/tonumber.html.md similarity index 79% rename from website/docs/configuration/functions/tonumber.html.md rename to website/docs/language/functions/tonumber.html.md index d4e6927e5..1b7e0236b 100644 --- a/website/docs/configuration/functions/tonumber.html.md +++ b/website/docs/language/functions/tonumber.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "tonumber - Functions - Configuration Language" sidebar_current: "docs-funcs-conversion-tonumber" description: |- @@ -8,10 +8,6 @@ description: |- # `tonumber` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `tonumber` converts its argument to a number value. Explicit type conversions are rarely necessary in Terraform because it will diff --git a/website/docs/configuration/functions/toset.html.md b/website/docs/language/functions/toset.html.md similarity index 83% rename from website/docs/configuration/functions/toset.html.md rename to website/docs/language/functions/toset.html.md index 2a63f183d..0dc2e73c1 100644 --- a/website/docs/configuration/functions/toset.html.md +++ b/website/docs/language/functions/toset.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "toset - Functions - Configuration Language" sidebar_current: "docs-funcs-conversion-toset" description: |- @@ -8,10 +8,6 @@ description: |- # `toset` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `toset` converts its argument to a set value. Explicit type conversions are rarely necessary in Terraform because it will diff --git a/website/docs/configuration/functions/tostring.html.md b/website/docs/language/functions/tostring.html.md similarity index 78% rename from website/docs/configuration/functions/tostring.html.md rename to website/docs/language/functions/tostring.html.md index 2cec00906..667b3619a 100644 --- a/website/docs/configuration/functions/tostring.html.md +++ b/website/docs/language/functions/tostring.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "tostring - Functions - Configuration Language" sidebar_current: "docs-funcs-conversion-tostring" description: |- @@ -8,10 +8,6 @@ description: |- # `tostring` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `tostring` converts its argument to a string value. Explicit type conversions are rarely necessary in Terraform because it will diff --git a/website/docs/configuration/functions/transpose.html.md b/website/docs/language/functions/transpose.html.md similarity index 71% rename from website/docs/configuration/functions/transpose.html.md rename to website/docs/language/functions/transpose.html.md index a95b49b2d..dde38dac1 100644 --- a/website/docs/configuration/functions/transpose.html.md +++ b/website/docs/language/functions/transpose.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "transpose - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-transpose" description: |- @@ -9,10 +9,6 @@ description: |- # `transpose` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `transpose` takes a map of lists of strings and swaps the keys and values to produce a new map of lists of strings. diff --git a/website/docs/configuration/functions/trim.html.md b/website/docs/language/functions/trim.html.md similarity index 75% rename from website/docs/configuration/functions/trim.html.md rename to website/docs/language/functions/trim.html.md index f6402a83c..43f12f2af 100644 --- a/website/docs/configuration/functions/trim.html.md +++ b/website/docs/language/functions/trim.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "trim - Functions - Configuration Language" sidebar_current: "docs-funcs-string-trim" description: |- @@ -9,10 +9,6 @@ description: |- # `trim` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `trim` removes the specified characters from the start and end of the given string. diff --git a/website/docs/configuration/functions/trimprefix.html.md b/website/docs/language/functions/trimprefix.html.md similarity index 79% rename from website/docs/configuration/functions/trimprefix.html.md rename to website/docs/language/functions/trimprefix.html.md index cc9e8a0e6..e1058ffba 100644 --- a/website/docs/configuration/functions/trimprefix.html.md +++ b/website/docs/language/functions/trimprefix.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "trimprefix - Functions - Configuration Language" sidebar_current: "docs-funcs-string-trimprefix" description: |- @@ -9,10 +9,6 @@ description: |- # `trimprefix` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `trimprefix` removes the specified prefix from the start of the given string. If the string does not start with the prefix, the string is returned unchanged. ## Examples diff --git a/website/docs/configuration/functions/trimspace.html.md b/website/docs/language/functions/trimspace.html.md similarity index 75% rename from website/docs/configuration/functions/trimspace.html.md rename to website/docs/language/functions/trimspace.html.md index a7f4ad38f..b4f1a40df 100644 --- a/website/docs/configuration/functions/trimspace.html.md +++ b/website/docs/language/functions/trimspace.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "trimspace - Functions - Configuration Language" sidebar_current: "docs-funcs-string-trimspace" description: |- @@ -9,10 +9,6 @@ description: |- # `trimspace` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `trimspace` removes any space characters from the start and end of the given string. diff --git a/website/docs/configuration/functions/trimsuffix.html.md b/website/docs/language/functions/trimsuffix.html.md similarity index 76% rename from website/docs/configuration/functions/trimsuffix.html.md rename to website/docs/language/functions/trimsuffix.html.md index aec898687..727c31b55 100644 --- a/website/docs/configuration/functions/trimsuffix.html.md +++ b/website/docs/language/functions/trimsuffix.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "trimsuffix - Functions - Configuration Language" sidebar_current: "docs-funcs-string-trimsuffix" description: |- @@ -9,10 +9,6 @@ description: |- # `trimsuffix` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `trimsuffix` removes the specified suffix from the end of the given string. ## Examples diff --git a/website/docs/configuration/functions/try.html.md b/website/docs/language/functions/try.html.md similarity index 94% rename from website/docs/configuration/functions/try.html.md rename to website/docs/language/functions/try.html.md index 57644bb5b..d389a066e 100644 --- a/website/docs/configuration/functions/try.html.md +++ b/website/docs/language/functions/try.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "try - Functions - Configuration Language" sidebar_current: "docs-funcs-conversion-try" description: |- @@ -10,10 +10,6 @@ description: |- # `try` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `try` evaluates all of its argument expressions in turn and returns the result of the first one that does not produce any errors. diff --git a/website/docs/configuration/functions/upper.html.md b/website/docs/language/functions/upper.html.md similarity index 74% rename from website/docs/configuration/functions/upper.html.md rename to website/docs/language/functions/upper.html.md index 286bd0c86..c2fe37579 100644 --- a/website/docs/configuration/functions/upper.html.md +++ b/website/docs/language/functions/upper.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "upper - Functions - Configuration Language" sidebar_current: "docs-funcs-string-upper" description: |- @@ -8,10 +8,6 @@ description: |- # `upper` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `upper` converts all cased letters in the given string to uppercase. ## Examples diff --git a/website/docs/configuration/functions/urlencode.html.md b/website/docs/language/functions/urlencode.html.md similarity index 83% rename from website/docs/configuration/functions/urlencode.html.md rename to website/docs/language/functions/urlencode.html.md index 1f6fda45e..d5c396156 100644 --- a/website/docs/configuration/functions/urlencode.html.md +++ b/website/docs/language/functions/urlencode.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "urlencode - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-urlencode" description: |- @@ -8,10 +8,6 @@ description: |- # `urlencode` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `urlencode` applies URL encoding to a given string. This function identifies characters in the given string that would have a diff --git a/website/docs/configuration/functions/uuid.html.md b/website/docs/language/functions/uuid.html.md similarity index 65% rename from website/docs/configuration/functions/uuid.html.md rename to website/docs/language/functions/uuid.html.md index 439ec8232..b801bf666 100644 --- a/website/docs/configuration/functions/uuid.html.md +++ b/website/docs/language/functions/uuid.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "uuid - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-uuid" description: |- @@ -8,10 +8,6 @@ description: |- # `uuid` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `uuid` generates a unique identifier string. The id is a generated and formatted as required by @@ -23,13 +19,13 @@ This function produces a new value each time it is called, and so using it directly in resource arguments will result in spurious diffs. We do not recommend using the `uuid` function in resource configurations, but it can be used with care in conjunction with -[the `ignore_changes` lifecycle meta-argument](../resources.html#ignore_changes). +[the `ignore_changes` lifecycle meta-argument](/docs/language/meta-arguments/lifecycle.html#ignore_changes). -In most cases we recommend using [the `random` provider](/docs/providers/random/index.html) +In most cases we recommend using [the `random` provider](https://registry.terraform.io/providers/hashicorp/random/latest/docs) instead, since it allows the one-time generation of random values that are -then retained in the Terraform [state](/docs/state/index.html) for use by +then retained in the Terraform [state](/docs/language/state/index.html) for use by future operations. In particular, -[`random_id`](/docs/providers/random/r/id.html) can generate results with +[`random_id`](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id) can generate results with equivalent randomness to the `uuid` function. ## Examples diff --git a/website/docs/configuration/functions/uuidv5.html.md b/website/docs/language/functions/uuidv5.html.md similarity index 92% rename from website/docs/configuration/functions/uuidv5.html.md rename to website/docs/language/functions/uuidv5.html.md index d48831821..24efce01f 100644 --- a/website/docs/configuration/functions/uuidv5.html.md +++ b/website/docs/language/functions/uuidv5.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "uuidv5 - Functions - Configuration Language" sidebar_current: "docs-funcs-crypto-uuidv5" description: |- @@ -8,10 +8,6 @@ description: |- # `uuidv5` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `uuidv5` generates a _name-based_ UUID, as described in [RFC 4122 section 4.3](https://tools.ietf.org/html/rfc4122#section-4.3), also known as a "version 5" UUID. diff --git a/website/docs/configuration/functions/values.html.md b/website/docs/language/functions/values.html.md similarity index 75% rename from website/docs/configuration/functions/values.html.md rename to website/docs/language/functions/values.html.md index a74aa0b03..4acc3772b 100644 --- a/website/docs/configuration/functions/values.html.md +++ b/website/docs/language/functions/values.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "values - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-values" description: |- @@ -8,10 +8,6 @@ description: |- # `values` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `values` takes a map and returns a list containing the values of the elements in that map. diff --git a/website/docs/configuration/functions/yamldecode.html.md b/website/docs/language/functions/yamldecode.html.md similarity index 92% rename from website/docs/configuration/functions/yamldecode.html.md rename to website/docs/language/functions/yamldecode.html.md index e276d8585..b24ae72aa 100644 --- a/website/docs/configuration/functions/yamldecode.html.md +++ b/website/docs/language/functions/yamldecode.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "yamldecode - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-yamldecode" description: |- @@ -9,10 +9,6 @@ description: |- # `yamldecode` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `yamldecode` parses a string as a subset of YAML, and produces a representation of its value. @@ -20,7 +16,7 @@ This function supports a subset of [YAML 1.2](https://yaml.org/spec/1.2/spec.htm as described below. This function maps YAML values to -[Terraform language values](../expressions.html#types-and-values) +[Terraform language values](/docs/language/expressions/types.html) in the following way: | YAML type | Terraform type | diff --git a/website/docs/configuration/functions/yamlencode.html.md b/website/docs/language/functions/yamlencode.html.md similarity index 91% rename from website/docs/configuration/functions/yamlencode.html.md rename to website/docs/language/functions/yamlencode.html.md index c562f854b..aa823dcde 100644 --- a/website/docs/configuration/functions/yamlencode.html.md +++ b/website/docs/language/functions/yamlencode.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "yamlencode - Functions - Configuration Language" sidebar_current: "docs-funcs-encoding-yamlencode" description: |- @@ -8,10 +8,6 @@ description: |- # `yamlencode` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `yamlencode` encodes a given value to a string using [YAML 1.2](https://yaml.org/spec/1.2/spec.html) block syntax. @@ -33,7 +29,7 @@ results are also valid YAML because YAML is a JSON superset. --> This function maps -[Terraform language values](../expressions.html#types-and-values) +[Terraform language values](/docs/language/expressions/types.html) to YAML tags in the following way: | Terraform type | YAML type | diff --git a/website/docs/configuration/functions/zipmap.html.md b/website/docs/language/functions/zipmap.html.md similarity index 79% rename from website/docs/configuration/functions/zipmap.html.md rename to website/docs/language/functions/zipmap.html.md index 61b203e92..d37926f74 100644 --- a/website/docs/configuration/functions/zipmap.html.md +++ b/website/docs/language/functions/zipmap.html.md @@ -1,5 +1,5 @@ --- -layout: "functions" +layout: "language" page_title: "zipmap - Functions - Configuration Language" sidebar_current: "docs-funcs-collection-zipmap" description: |- @@ -9,10 +9,6 @@ description: |- # `zipmap` Function --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Interpolation Syntax](../../configuration-0-11/interpolation.html). - `zipmap` constructs a map from a list of keys and a corresponding list of values. diff --git a/website/docs/language/index.html.md b/website/docs/language/index.html.md new file mode 100644 index 000000000..49067f802 --- /dev/null +++ b/website/docs/language/index.html.md @@ -0,0 +1,118 @@ +--- +layout: "language" +page_title: "Overview - Configuration Language" +--- + +# Terraform Language Documentation + +This is the documentation for Terraform's configuration language. It is relevant +to users of [Terraform CLI](/docs/cli/index.html), +[Terraform Cloud](/docs/cloud/index.html), and +[Terraform Enterprise](/docs/enterprise/index.html). + +> **Hands-on:** Try the [Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + +_The Terraform language is Terraform's primary user interface._ In every edition +of Terraform, a configuration written in the Terraform language is always at the +heart of the workflow. + +## About the Terraform Language + +The main purpose of the Terraform language is declaring +[resources](/docs/language/resources/index.html), which represent infrastructure objects. All other +language features exist only to make the definition of resources more flexible +and convenient. + +A _Terraform configuration_ is a complete document in the Terraform language +that tells Terraform how to manage a given collection of infrastructure. A +configuration can consist of multiple files and directories. + +The syntax of the Terraform language consists of only a few basic elements: + +```hcl +resource "aws_vpc" "main" { + cidr_block = var.base_cidr_block +} + + "" "" { + # Block body + = # Argument +} +``` + +- _Blocks_ are containers for other content and usually represent the + configuration of some kind of object, like a resource. Blocks have a + _block type,_ can have zero or more _labels,_ and have a _body_ that contains + any number of arguments and nested blocks. Most of Terraform's features are + controlled by top-level blocks in a configuration file. +- _Arguments_ assign a value to a name. They appear within blocks. +- _Expressions_ represent a value, either literally or by referencing and + combining other values. They appear as values for arguments, or within other + expressions. + +The Terraform language is declarative, describing an intended goal rather than +the steps to reach that goal. The ordering of blocks and the files they are +organized into are generally not significant; Terraform only considers implicit +and explicit relationships between resources when determining an order of +operations. + +### Example + +The following example describes a simple network topology for Amazon Web +Services, just to give a sense of the overall structure and syntax of the +Terraform language. Similar configurations can be created for other virtual +network services, using resource types defined by other providers, and a +practical network configuration will often contain additional elements not +shown here. + +```hcl +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 1.0.4" + } + } +} + +variable "aws_region" {} + +variable "base_cidr_block" { + description = "A /16 CIDR range definition, such as 10.1.0.0/16, that the VPC will use" + default = "10.1.0.0/16" +} + +variable "availability_zones" { + description = "A list of availability zones in which to create subnets" + type = list(string) +} + +provider "aws" { + region = var.aws_region +} + +resource "aws_vpc" "main" { + # Referencing the base_cidr_block variable allows the network address + # to be changed without modifying the configuration. + cidr_block = var.base_cidr_block +} + +resource "aws_subnet" "az" { + # Create one subnet for each given availability zone. + count = length(var.availability_zones) + + # For each subnet, use one of the specified availability zones. + availability_zone = var.availability_zones[count.index] + + # By referencing the aws_vpc.main object, Terraform knows that the subnet + # must be created only after the VPC is created. + vpc_id = aws_vpc.main.id + + # Built-in functions and operators can be used for simple transformations of + # values, such as computing a subnet address. Here we create a /20 prefix for + # each subnet, using consecutive addresses for each availability zone, + # such as 10.1.16.0/20 . + cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 4, count.index+1) +} +``` + diff --git a/website/docs/language/meta-arguments/count.html.md b/website/docs/language/meta-arguments/count.html.md new file mode 100644 index 000000000..4e92bdb95 --- /dev/null +++ b/website/docs/language/meta-arguments/count.html.md @@ -0,0 +1,122 @@ +--- +layout: "language" +page_title: "The count Meta-Argument - Configuration Language" +--- + +# The `count` Meta-Argument + +-> **Version note:** Module support for `count` was added in Terraform 0.13, and +previous versions can only use it with resources. + +-> **Note:** A given resource or module block cannot use both `count` and `for_each`. + +> **Hands-on:** Try the [Manage Similar Resources With Count](https://learn.hashicorp.com/tutorials/terraform/count?in=terraform/0-13&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +By default, a [resource block](/docs/language/resources/syntax.html) configures one real +infrastructure object. (Similarly, a +[module block](/docs/language/modules/syntax.html) includes a +child module's contents into the configuration one time.) +However, sometimes you want to manage several similar objects (like a fixed +pool of compute instances) without writing a separate block for each one. +Terraform has two ways to do this: +`count` and [`for_each`](/docs/language/meta-arguments/for_each.html). + +If a resource or module block includes a `count` argument whose value is a whole number, +Terraform will create that many instances. + +## Basic Syntax + +`count` is a meta-argument defined by the Terraform language. It can be used +with modules and with every resource type. + +The `count` meta-argument accepts a whole number, and creates that many +instances of the resource or module. Each instance has a distinct infrastructure object +associated with it, and each is separately created, +updated, or destroyed when the configuration is applied. + +```hcl +resource "aws_instance" "server" { + count = 4 # create four similar EC2 instances + + ami = "ami-a1b2c3d4" + instance_type = "t2.micro" + + tags = { + Name = "Server ${count.index}" + } +} +``` + +## The `count` Object + +In blocks where `count` is set, an additional `count` object is +available in expressions, so you can modify the configuration of each instance. +This object has one attribute: + +- `count.index` — The distinct index number (starting with `0`) corresponding + to this instance. + +## Using Expressions in `count` + +The `count` meta-argument accepts numeric [expressions](/docs/language/expressions/index.html). +However, unlike most arguments, the `count` value must be known +_before_ Terraform performs any remote resource actions. This means `count` +can't refer to any resource attributes that aren't known until after a +configuration is applied (such as a unique ID generated by the remote API when +an object is created). + +## Referring to Instances + +When `count` is set, Terraform distinguishes between the block itself +and the multiple _resource or module instances_ associated with it. Instances are +identified by an index number, starting with `0`. + +- `.` or `module.` (for example, `aws_instance.server`) refers to the resource block. +- `.[]` or `module.[]` (for example, `aws_instance.server[0]`, + `aws_instance.server[1]`, etc.) refers to individual instances. + +This is different from resources and modules without `count` or `for_each`, which can be +referenced without an index or key. + +Similarly, resources from child modules with multiple instances are prefixed +with `module.[]` when displayed in plan output and elsewhere in the UI. +For a module without `count` or `for_each`, the address will not contain +the module index as the module's name suffices to reference the module. + +-> **Note:** Within nested `provisioner` or `connection` blocks, the special +`self` object refers to the current _resource instance,_ not the resource block +as a whole. + +## When to Use `for_each` Instead of `count` + +If your instances are almost identical, `count` is appropriate. If some +of their arguments need distinct values that can't be directly derived from an +integer, it's safer to use `for_each`. + +Before `for_each` was available, it was common to derive `count` from the +length of a list and use `count.index` to look up the original list value: + +```hcl +variable "subnet_ids" { + type = list(string) +} + +resource "aws_instance" "server" { + # Create one instance for each subnet + count = length(var.subnet_ids) + + ami = "ami-a1b2c3d4" + instance_type = "t2.micro" + subnet_id = var.subnet_ids[count.index] + + tags = { + Name = "Server ${count.index}" + } +} +``` + +This was fragile, because the resource instances were still identified by their +_index_ instead of the string values in the list. If an element was removed from +the middle of the list, every instance _after_ that element would see its +`subnet_id` value change, resulting in more remote object changes than intended. +Using `for_each` gives the same flexibility without the extra churn. diff --git a/website/docs/language/meta-arguments/depends_on.html.md b/website/docs/language/meta-arguments/depends_on.html.md new file mode 100644 index 000000000..fb8a35cc6 --- /dev/null +++ b/website/docs/language/meta-arguments/depends_on.html.md @@ -0,0 +1,76 @@ +--- +layout: "language" +page_title: "The depends_on Meta-Argument - Configuration Language" +--- + +# The `depends_on` Meta-Argument + +-> **Version note:** Module support for `depends_on` was added in Terraform 0.13, and +previous versions can only use it with resources. + +Use the `depends_on` meta-argument to handle hidden resource or module dependencies that +Terraform can't automatically infer. + +Explicitly specifying a dependency is only necessary when a resource or module relies on +some other resource's behavior but _doesn't_ access any of that resource's data +in its arguments. + +This argument is available in `module` blocks and in all `resource` blocks, +regardless of resource type. For example: + +```hcl +resource "aws_iam_role" "example" { + name = "example" + + # assume_role_policy is omitted for brevity in this example. See the + # documentation for aws_iam_role for a complete example. + assume_role_policy = "..." +} + +resource "aws_iam_instance_profile" "example" { + # Because this expression refers to the role, Terraform can infer + # automatically that the role must be created first. + role = aws_iam_role.example.name +} + +resource "aws_iam_role_policy" "example" { + name = "example" + role = aws_iam_role.example.name + policy = jsonencode({ + "Statement" = [{ + # This policy allows software running on the EC2 instance to + # access the S3 API. + "Action" = "s3:*", + "Effect" = "Allow", + }], + }) +} + +resource "aws_instance" "example" { + ami = "ami-a1b2c3d4" + instance_type = "t2.micro" + + # Terraform can infer from this that the instance profile must + # be created before the EC2 instance. + iam_instance_profile = aws_iam_instance_profile.example + + # However, if software running in this EC2 instance needs access + # to the S3 API in order to boot properly, there is also a "hidden" + # dependency on the aws_iam_role_policy that Terraform cannot + # automatically infer, so it must be declared explicitly: + depends_on = [ + aws_iam_role_policy.example, + ] +} +``` + +The `depends_on` meta-argument, if present, must be a list of references +to other resources or child modules in the same calling module. +Arbitrary expressions are not allowed in the `depends_on` argument value, +because its value must be known before Terraform knows resource relationships +and thus before it can safely evaluate expressions. + +The `depends_on` argument should be used only as a last resort. When using it, +always include a comment explaining why it is being used, to help future +maintainers understand the purpose of the additional dependency. + diff --git a/website/docs/language/meta-arguments/for_each.html.md b/website/docs/language/meta-arguments/for_each.html.md new file mode 100644 index 000000000..44b80a707 --- /dev/null +++ b/website/docs/language/meta-arguments/for_each.html.md @@ -0,0 +1,218 @@ +--- +layout: "language" +page_title: "The for_each Meta-Argument - Configuration Language" +--- + +# The `for_each` Meta-Argument + +-> **Version note:** `for_each` was added in Terraform 0.12.6. Module support +for `for_each` was added in Terraform 0.13, and previous versions can only use +it with resources. + +-> **Note:** A given resource or module block cannot use both `count` and `for_each`. + +> **Hands-on:** Try the [Manage Similar Resources With For Each](https://learn.hashicorp.com/tutorials/terraform/for-each?in=terraform/0-13&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +By default, a [resource block](/docs/language/resources/syntax.html) configures one real +infrastructure object (and similarly, a +[module block](/docs/language/modules/syntax.html) includes a +child module's contents into the configuration one time). +However, sometimes you want to manage several similar objects (like a fixed +pool of compute instances) without writing a separate block for each one. +Terraform has two ways to do this: +[`count`](/docs/language/meta-arguments/count.html) and `for_each`. + +If a resource or module block includes a `for_each` argument whose value is a map or +a set of strings, Terraform will create one instance for each member of +that map or set. + +## Basic Syntax + +`for_each` is a meta-argument defined by the Terraform language. It can be used +with modules and with every resource type. + +The `for_each` meta-argument accepts a map or a set of strings, and creates an +instance for each item in that map or set. Each instance has a distinct +infrastructure object associated with it, and each is separately created, +updated, or destroyed when the configuration is applied. + +Map: + +```hcl +resource "azurerm_resource_group" "rg" { + for_each = { + a_group = "eastus" + another_group = "westus2" + } + name = each.key + location = each.value +} +``` + +Set of strings: + +```hcl +resource "aws_iam_user" "the-accounts" { + for_each = toset( ["Todd", "James", "Alice", "Dottie"] ) + name = each.key +} +``` + +Child module: + +```hcl +# my_buckets.tf +module "bucket" { + for_each = toset(["assets", "media"]) + source = "./publish_bucket" + name = "${each.key}_bucket" +} +``` + +```hcl +# publish_bucket/bucket-and-cloudfront.tf +variable "name" {} # this is the input parameter of the module + +resource "aws_s3_bucket" "example" { + # Because var.name includes each.key in the calling + # module block, its value will be different for + # each instance of this module. + bucket = var.name + + # ... +} + +resource "aws_iam_user" "deploy_user" { + # ... +} +``` + +## The `each` Object + +In blocks where `for_each` is set, an additional `each` object is +available in expressions, so you can modify the configuration of each instance. +This object has two attributes: + +- `each.key` — The map key (or set member) corresponding to this instance. +- `each.value` — The map value corresponding to this instance. (If a set was + provided, this is the same as `each.key`.) + +## Limitations on values used in `for_each` + +The keys of the map (or all the values in the case of a set of strings) must +be _known values_, or you will get an error message that `for_each` has dependencies +that cannot be determined before apply, and a `-target` may be needed. + +`for_each` keys cannot be the result (or rely on the result of) of impure functions, +including `uuid`, `bcrypt`, or `timestamp`, as their evaluation is deferred during the +main evaluation step. + +Sensitive values, such as [sensitive input variables](https://www.terraform.io/docs/language/values/variables.html#suppressing-values-in-cli-output), +[sensitive outputs](https://www.terraform.io/docs/language/values/outputs.html#sensitive-suppressing-values-in-cli-output), +or [sensitive resource attributes](https://www.terraform.io/docs/language/expressions/references.html#sensitive-resource-attributes) +(if the `provider_sensitive_attrs` experiment is enabled), cannot be used as arguments +to `for_each`. The value used in `for_each` is used to identify the resource instance +and will always be disclosed in UI output, which is why sensitive values are not allowed. +Attempts to use sensitive values as `for_each` arguments will result in an error. + +If you transform a value containing sensitive data into an argument to be used in `for_each`, be aware that +[most functions in Terraform will return a sensitive result if given an argument with any sensitive content](https://www.terraform.io/docs/language/expressions/function-calls.html#using-sensitive-data-as-function-arguments). +In many cases, you can achieve similar results to a function used for this purpose by +using a `for` expression. For example, if you would like to call `keys(local.map)`, where +`local.map` is an object with sensitive values (but non-sensitive keys), you can create a +value to pass to `for_each` with `toset([for k,v in local.map : k])`. + +## Using Expressions in `for_each` + +The `for_each` meta-argument accepts map or set [expressions](/docs/language/expressions/index.html). +However, unlike most arguments, the `for_each` value must be known +_before_ Terraform performs any remote resource actions. This means `for_each` +can't refer to any resource attributes that aren't known until after a +configuration is applied (such as a unique ID generated by the remote API when +an object is created). + +The `for_each` value must be a map or set with one element per desired +resource instance. When providing a set, you must use an expression that +explicitly returns a set value, like the [`toset`](/docs/language/functions/toset.html) +function; to prevent unwanted surprises during conversion, the `for_each` +argument does not implicitly convert lists or tuples to sets. +If you need to declare resource instances based on a nested +data structure or combinations of elements from multiple data structures you +can use Terraform expressions and functions to derive a suitable value. +For example: + +* Transform a multi-level nested structure into a flat list by + [using nested `for` expressions with the `flatten` function](/docs/language/functions/flatten.html#flattening-nested-structures-for-for_each). +* Produce an exhaustive list of combinations of elements from two or more + collections by + [using the `setproduct` function inside a `for` expression](/docs/language/functions/setproduct.html#finding-combinations-for-for_each). + +## Referring to Instances + +When `for_each` is set, Terraform distinguishes between the block itself +and the multiple _resource or module instances_ associated with it. Instances are +identified by a map key (or set member) from the value provided to `for_each`. + +- `.` or `module.` (for example, `azurerm_resource_group.rg`) refers to the block. +- `.[]` or `module.[]` (for example, `azurerm_resource_group.rg["a_group"]`, + `azurerm_resource_group.rg["another_group"]`, etc.) refers to individual instances. + +This is different from resources and modules without `count` or `for_each`, which can be +referenced without an index or key. + +Similarly, resources from child modules with multiple instances are prefixed +with `module.[]` when displayed in plan output and elsewhere in the UI. +For a module without `count` or `for_each`, the address will not contain +the module index as the module's name suffices to reference the module. + +-> **Note:** Within nested `provisioner` or `connection` blocks, the special +`self` object refers to the current _resource instance,_ not the resource block +as a whole. + +## Using Sets + +The Terraform language doesn't have a literal syntax for +[set values](/docs/language/expressions/type-constraints.html#collection-types), but you can use the `toset` +function to explicitly convert a list of strings to a set: + +```hcl +locals { + subnet_ids = toset([ + "subnet-abcdef", + "subnet-012345", + ]) +} + +resource "aws_instance" "server" { + for_each = local.subnet_ids + + ami = "ami-a1b2c3d4" + instance_type = "t2.micro" + subnet_id = each.key # note: each.key and each.value are the same for a set + + tags = { + Name = "Server ${each.key}" + } +} +``` + +Conversion from list to set discards the ordering of the items in the list and +removes any duplicate elements. `toset(["b", "a", "b"])` will produce a set +containing only `"a"` and `"b"` in no particular order; the second `"b"` is +discarded. + +If you are writing a module with an [input variable](/docs/language/values/variables.html) that +will be used as a set of strings for `for_each`, you can set its type to +`set(string)` to avoid the need for an explicit type conversion: + +```hcl +variable "subnet_ids" { + type = set(string) +} + +resource "aws_instance" "server" { + for_each = var.subnet_ids + + # (and the other arguments as above) +} +``` diff --git a/website/docs/language/meta-arguments/lifecycle.html.md b/website/docs/language/meta-arguments/lifecycle.html.md new file mode 100644 index 000000000..6671ff3b3 --- /dev/null +++ b/website/docs/language/meta-arguments/lifecycle.html.md @@ -0,0 +1,110 @@ +--- +layout: "language" +page_title: "The lifecycle Meta-Argument - Configuration Language" +--- + +# The `lifecycle` Meta-Argument + +The general lifecycle for resources is described in the +[Resource Behavior](/docs/language/resources/behavior.html) page. Some details of +that behavior can be customized using the special nested `lifecycle` block +within a resource block body: + +```hcl +resource "azurerm_resource_group" "example" { + # ... + + lifecycle { + create_before_destroy = true + } +} +``` + +## Syntax and Arguments + +`lifecycle` is a nested block that can appear within a resource block. +The `lifecycle` block and its contents are meta-arguments, available +for all `resource` blocks regardless of type. + +The following arguments can be used within a `lifecycle` block: + +* `create_before_destroy` (bool) - By default, when Terraform must change + a resource argument that cannot be updated in-place due to + remote API limitations, Terraform will instead destroy the existing object + and then create a new replacement object with the new configured arguments. + + The `create_before_destroy` meta-argument changes this behavior so that + the new replacement object is created _first,_ and the prior object + is destroyed after the replacement is created. + + This is an opt-in behavior because many remote object types have unique + name requirements or other constraints that must be accommodated for + both a new and an old object to exist concurrently. Some resource types + offer special options to append a random suffix onto each object name to + avoid collisions, for example. Terraform CLI cannot automatically activate + such features, so you must understand the constraints for each resource + type before using `create_before_destroy` with it. + +* `prevent_destroy` (bool) - This meta-argument, when set to `true`, will + cause Terraform to reject with an error any plan that would destroy the + infrastructure object associated with the resource, as long as the argument + remains present in the configuration. + + This can be used as a measure of safety against the accidental replacement + of objects that may be costly to reproduce, such as database instances. + However, it will make certain configuration changes impossible to apply, + and will prevent the use of the `terraform destroy` command once such + objects are created, and so this option should be used sparingly. + + Since this argument must be present in configuration for the protection to + apply, note that this setting does not prevent the remote object from + being destroyed if the `resource` block were removed from configuration + entirely: in that case, the `prevent_destroy` setting is removed along + with it, and so Terraform will allow the destroy operation to succeed. + +* `ignore_changes` (list of attribute names) - By default, Terraform detects + any difference in the current settings of a real infrastructure object + and plans to update the remote object to match configuration. + + The `ignore_changes` feature is intended to be used when a resource is + created with references to data that may change in the future, but should + not affect said resource after its creation. In some rare cases, settings + of a remote object are modified by processes outside of Terraform, which + Terraform would then attempt to "fix" on the next run. In order to make + Terraform share management responsibilities of a single object with a + separate process, the `ignore_changes` meta-argument specifies resource + attributes that Terraform should ignore when planning updates to the + associated remote object. + + The arguments corresponding to the given attribute names are considered + when planning a _create_ operation, but are ignored when planning an + _update_. The arguments are the relative address of the attributes in the + resource. Map and list elements can be referenced using index notation, + like `tags["Name"]` and `list[0]` respectively. + + ```hcl + resource "aws_instance" "example" { + # ... + + lifecycle { + ignore_changes = [ + # Ignore changes to tags, e.g. because a management agent + # updates these based on some ruleset managed elsewhere. + tags, + ] + } + } + ``` + + Instead of a list, the special keyword `all` may be used to instruct + Terraform to ignore _all_ attributes, which means that Terraform can + create and destroy the remote object but will never propose updates to it. + + Only attributes defined by the resource type can be ignored. + `ignore_changes` cannot be applied to itself or to any other meta-arguments. + +## Literal Values Only + +The `lifecycle` settings all affect how Terraform constructs and traverses +the dependency graph. As a result, only literal values can be used because +the processing happens too early for arbitrary expression evaluation. diff --git a/website/docs/language/meta-arguments/module-providers.html.md b/website/docs/language/meta-arguments/module-providers.html.md new file mode 100644 index 000000000..99cd8e431 --- /dev/null +++ b/website/docs/language/meta-arguments/module-providers.html.md @@ -0,0 +1,123 @@ +--- +layout: "language" +page_title: "The Module providers Meta-Argument - Configuration Language" +--- + +# The Module `providers` Meta-Argument + +In a [module call](/docs/language/modules/syntax.html) block, the +optional `providers` meta-argument specifies which +[provider configurations](/docs/language/providers/configuration.html) from the parent +module will be available inside the child module. + +```hcl +# The default "aws" configuration is used for AWS resources in the root +# module where no explicit provider instance is selected. +provider "aws" { + region = "us-west-1" +} + +# An alternate configuration is also defined for a different +# region, using the alias "usw2". +provider "aws" { + alias = "usw2" + region = "us-west-2" +} + +# An example child module is instantiated with the alternate configuration, +# so any AWS resources it defines will use the us-west-2 region. +module "example" { + source = "./example" + providers = { + aws = aws.usw2 + } +} +``` + +## Default Behavior: Inherit Default Providers + +The `providers` argument is optional. If you omit it, a child module inherits +all of the _default_ provider configurations from its parent module. (Default +provider configurations are ones that don't use the `alias` argument.) + +If you specify a `providers` argument, it cancels this default behavior, and the +child module will _only_ have access to the provider configurations you specify. + +## Usage and Behavior + +The value of `providers` is a map, where: + +- The keys are the provider configuration names used inside the child module. +- The values are provider configuration names from the parent module. + +Both keys and values should be unquoted references to provider configurations. +For default configurations, this is the local name of the provider; for +alternate configurations, this is a `.` reference. + +Within a child module, resources are assigned to provider configurations as +normal — either Terraform chooses a default based on the name of the resource +type, or the resource specifies an alternate configuration with the `provider` +argument. If the module receives a `providers` map when it's called, the +provider configuration names used within the module are effectively remapped to +refer the specified configurations from the parent module. + +## When to Specify Providers + +There are two main reasons to use the `providers` argument: + +- Using different default provider configurations for a child module. +- Configuring a module that requires multiple configurations of the same provider. + +### Changing Default Provider Configurations + +Most re-usable modules only use default provider configurations, which they can +automatically inherit from their caller when `providers` is omitted. + +However, in Terraform configurations that use multiple configurations of the +same provider, you might want some child modules to use the default provider +configuration and other ones to use an alternate. (This usually happens when +using one configuration to manage resources in multiple different regions of the +same cloud provider.) + +By using the `providers` argument (like in the code example above), you can +accommodate this without needing to edit the child module. Although the code +within the child module always refers to the default provider configuration, the +actual configuration of that default can be different for each instance. + +### Modules With Alternate Provider Configurations + +In rare cases, a single re-usable module might require multiple configurations +of the same provider. For example, a module that configures connectivity between +networks in two AWS regions is likely to need both a source and a destination +region. In that case, the root module may look something like this: + +```hcl +provider "aws" { + alias = "usw1" + region = "us-west-1" +} + +provider "aws" { + alias = "usw2" + region = "us-west-2" +} + +module "tunnel" { + source = "./tunnel" + providers = { + aws.src = aws.usw1 + aws.dst = aws.usw2 + } +} +``` + +Non-default provider configurations are never automatically inherited, so any +module that works like this will always need a `providers` argument. The +documentation for the module should specify all of the provider configuration +names it needs. + +## More Information for Module Developers + +For more details and guidance about working with providers inside a re-usable +child module, see +[Module Development: Providers Within Modules](/docs/language/modules/develop/providers.html). diff --git a/website/docs/language/meta-arguments/resource-provider.html.md b/website/docs/language/meta-arguments/resource-provider.html.md new file mode 100644 index 000000000..bdecf834c --- /dev/null +++ b/website/docs/language/meta-arguments/resource-provider.html.md @@ -0,0 +1,58 @@ +--- +layout: "language" +page_title: "The Resource provider Meta-Argument - Configuration Language" +--- + +# The Resource `provider` Meta-Argument + +The `provider` meta-argument specifies which provider configuration to use for a resource, +overriding Terraform's default behavior of selecting one based on the resource +type name. Its value should be an unquoted `.` reference. + +As described in [Provider Configuration](/docs/language/providers/configuration.html), you can optionally +create multiple configurations for a single provider (usually to manage +resources in different regions of multi-region services). Each provider can have +one default configuration, and any number of alternate configurations that +include an extra name segment (or "alias"). + +By default, Terraform interprets the initial word in the resource type name +(separated by underscores) as the local name of a provider, and uses that +provider's default configuration. For example, the resource type +`google_compute_instance` is associated automatically with the default +configuration for the provider named `google`. + +By using the `provider` meta-argument, you can select an alternate provider +configuration for a resource: + +```hcl +# default configuration +provider "google" { + region = "us-central1" +} + +# alternate configuration, whose alias is "europe" +provider "google" { + alias = "europe" + region = "europe-west1" +} + +resource "google_compute_instance" "example" { + # This "provider" meta-argument selects the google provider + # configuration whose alias is "europe", rather than the + # default configuration. + provider = google.europe + + # ... +} +``` + +A resource always has an implicit dependency on its associated provider, to +ensure that the provider is fully configured before any resource actions +are taken. + +The `provider` meta-argument expects +[a `.` reference](/docs/language/providers/configuration.html#referring-to-alternate-provider-configurations), +which does not need to be quoted. Arbitrary expressions are not permitted for +`provider` because it must be resolved while Terraform is constructing the +dependency graph, before it is safe to evaluate expressions. + diff --git a/website/docs/modules/composition.html.markdown b/website/docs/language/modules/develop/composition.html.md similarity index 94% rename from website/docs/modules/composition.html.markdown rename to website/docs/language/modules/develop/composition.html.md index 3a17deca4..96c2da685 100644 --- a/website/docs/modules/composition.html.markdown +++ b/website/docs/language/modules/develop/composition.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Module Composition" sidebar_current: "docs-modules-composition" description: |- @@ -9,12 +9,6 @@ description: |- # Module Composition --> This section is written for **Terraform v0.12 or later**. The general patterns - described in this section _do_ apply to earlier versions, but the examples - shown are using v0.12-only syntax and features. For general information - on module usage in prior versions, see - [the v0.11 documentation about modules](/docs/configuration-0-11/modules.html). - In a simple Terraform configuration with only one root module, we create a flat set of resources and use Terraform's expression syntax to describe the relationships between these resources: @@ -316,7 +310,7 @@ Most modules contain `resource` blocks and thus describe infrastructure to be created and managed. It may sometimes be useful to write modules that do not describe any new infrastructure at all, but merely retrieve information about existing infrastructure that was created elsewhere using -[data sources](/docs/configuration/data-sources.html). +[data sources](/docs/language/data-sources/index.html). As with conventional modules, we suggest using this technique only when the module raises the level of abstraction in some way, in this case by @@ -345,14 +339,14 @@ module "k8s_cluster" { The `network` module itself could retrieve this data in a number of different ways: it could query the AWS API directly using -[`aws_vpc`](/docs/providers/aws/d/vpc.html) +[`aws_vpc`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) and -[`aws_subnet_ids`](/docs/providers/aws/d/subnet_ids.html) +[`aws_subnet_ids`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet_ids) data sources, or it could read saved information from a Consul cluster using -[`consul_keys`](https://www.terraform.io/docs/providers/consul/d/keys.html), +[`consul_keys`](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/data-sources/keys), or it might read the outputs directly from the state of the configuration that manages the network using -[`terraform_remote_state`](https://www.terraform.io/docs/providers/terraform/d/remote_state.html). +[`terraform_remote_state`](https://www.terraform.io/docs/language/state/remote-state-data.html). The key benefit of this approach is that the source of this information can change over time without updating every configuration that depends on it. diff --git a/website/docs/language/modules/develop/index.html.md b/website/docs/language/modules/develop/index.html.md new file mode 100644 index 000000000..5e71edd59 --- /dev/null +++ b/website/docs/language/modules/develop/index.html.md @@ -0,0 +1,73 @@ +--- +layout: "language" +page_title: "Creating Modules" +sidebar_current: "docs-modules" +description: |- + A module is a container for multiple resources that are used together. +--- + +# Creating Modules + +> **Hands-on:** Try the [Reuse Configuration with Modules](https://learn.hashicorp.com/collections/terraform/modules?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + +A _module_ is a container for multiple resources that are used together. +Modules can be used to create lightweight abstractions, so that you can +describe your infrastructure in terms of its architecture, rather than +directly in terms of physical objects. + +The `.tf` files in your working directory when you run [`terraform plan`](/docs/cli/commands/plan.html) +or [`terraform apply`](/docs/cli/commands/apply.html) together form the _root_ +module. That module may [call other modules](/docs/language/modules/syntax.html#calling-a-child-module) +and connect them together by passing output values from one to input values +of another. + +To learn how to _use_ modules, see [the Modules configuration section](/docs/language/modules/index.html). +This section is about _creating_ re-usable modules that other configurations +can include using `module` blocks. + +## Module structure + +Re-usable modules are defined using all of the same +[configuration language](/docs/language/index.html) concepts we use in root modules. +Most commonly, modules use: + +* [Input variables](/docs/language/values/variables.html) to accept values from + the calling module. +* [Output values](/docs/language/values/outputs.html) to return results to the + calling module, which it can then use to populate arguments elsewhere. +* [Resources](/docs/language/resources/index.html) to define one or more + infrastructure objects that the module will manage. + +To define a module, create a new directory for it and place one or more `.tf` +files inside just as you would do for a root module. Terraform can load modules +either from local relative paths or from remote repositories; if a module will +be re-used by lots of configurations you may wish to place it in its own +version control repository. + +Modules can also call other modules using a `module` block, but we recommend +keeping the module tree relatively flat and using [module composition](./composition.html) +as an alternative to a deeply-nested tree of modules, because this makes +the individual modules easier to re-use in different combinations. + +## When to write a module + +In principle any combination of resources and other constructs can be factored +out into a module, but over-using modules can make your overall Terraform +configuration harder to understand and maintain, so we recommend moderation. + +A good module should raise the level of abstraction by describing a new concept +in your architecture that is constructed from resource types offered by +providers. + +For example, `aws_instance` and `aws_elb` are both resource types belonging to +the AWS provider. You might use a module to represent the higher-level concept +"[HashiCorp Consul](https://www.consul.io/) cluster running in AWS" which +happens to be constructed from these and other AWS provider resources. + +We _do not_ recommend writing modules that are just thin wrappers around single +other resource types. If you have trouble finding a name for your module that +isn't the same as the main resource type inside it, that may be a sign that +your module is not creating any new abstraction and so the module is +adding unnecessary complexity. Just use the resource type directly in the +calling module instead. + diff --git a/website/docs/language/modules/develop/providers.html.md b/website/docs/language/modules/develop/providers.html.md new file mode 100644 index 000000000..4d71e3325 --- /dev/null +++ b/website/docs/language/modules/develop/providers.html.md @@ -0,0 +1,369 @@ +--- +layout: "language" +page_title: "Providers Within Modules - Configuration Language" +--- + +# Providers Within Modules + +[inpage-providers]: #providers-within-modules + +In a configuration with multiple modules, there are some special considerations +for how resources are associated with provider configurations. + +Each resource in the configuration must be associated with one provider +configuration. Provider configurations, unlike most other concepts in +Terraform, are global to an entire Terraform configuration and can be shared +across module boundaries. Provider configurations can be defined only in a +root Terraform module. + +Providers can be passed down to descendent modules in two ways: either +_implicitly_ through inheritance, or _explicitly_ via the `providers` argument +within a `module` block. These two options are discussed in more detail in the +following sections. + +A module intended to be called by one or more other modules must not contain +any `provider` blocks, with the exception of the special +"proxy provider blocks" discussed under +_[Passing Providers Explicitly](#passing-providers-explicitly)_ +below. + +For backward compatibility with configurations targeting Terraform v0.10 and +earlier Terraform does not produce an error for a `provider` block in a shared +module if the `module` block only uses features available in Terraform v0.10, +but that is a legacy usage pattern that is no longer recommended. A legacy +module containing its own provider configurations is not compatible with the +`for_each`, `count`, and `depends_on` arguments that were introduced in +Terraform v0.13. For more information, see +[Legacy Shared Modules with Provider Configurations](#legacy-shared-modules-with-provider-configurations). + +Provider configurations are used for all operations on associated resources, +including destroying remote objects and refreshing state. Terraform retains, as +part of its state, a reference to the provider configuration that was most +recently used to apply changes to each resource. When a `resource` block is +removed from the configuration, this record in the state will be used to locate +the appropriate configuration because the resource's `provider` argument +(if any) will no longer be present in the configuration. + +As a consequence, you must ensure that all resources that belong to a +particular provider configuration are destroyed before you can remove that +provider configuration's block from your configuration. If Terraform finds +a resource instance tracked in the state whose provider configuration block is +no longer available then it will return an error during planning, prompting you +to reintroduce the provider configuration. + +## Provider Version Constraints in Modules + +Although provider _configurations_ are shared between modules, each module must +declare its own [provider requirements](/docs/language/providers/requirements.html), so that +Terraform can ensure that there is a single version of the provider that is +compatible with all modules in the configuration and to specify the +[source address](/docs/language/providers/requirements.html#source-addresses) that serves as +the global (module-agnostic) identifier for a provider. + +To declare that a module requires particular versions of a specific provider, +use a `required_providers` block inside a `terraform` block: + +```hcl +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 2.7.0" + } + } +} +``` + +A provider requirement says, for example, "This module requires version v2.7.0 +of the provider `hashicorp/aws` and will refer to it as `aws`." It doesn't, +however, specify any of the configuration settings that determine what remote +endpoints the provider will access, such as an AWS region; configuration +settings come from provider _configurations_, and a particular overall Terraform +configuration can potentially have +[several different configurations for the same provider](/docs/language/providers/configuration.html#alias-multiple-provider-configurations). + +If you are writing a shared Terraform module, constrain only the minimum +required provider version using a `>=` constraint. This should specify the +minimum version containing the features your module relies on, and thus allow a +user of your module to potentially select a newer provider version if other +features are needed by other parts of their overall configuration. + +## Implicit Provider Inheritance + +For convenience in simple configurations, a child module automatically inherits +default (un-aliased) provider configurations from its parent. This means that +explicit `provider` blocks appear only in the root module, and downstream +modules can simply declare resources for that provider and have them +automatically associated with the root provider configurations. + +For example, the root module might contain only a `provider` block and a +`module` block to instantiate a child module: + +```hcl +provider "aws" { + region = "us-west-1" +} + +module "child" { + source = "./child" +} +``` + +The child module can then use any resource from this provider with no further +provider configuration required: + +```hcl +resource "aws_s3_bucket" "example" { + bucket = "provider-inherit-example" +} +``` + +We recommend using this approach when a single configuration for each provider +is sufficient for an entire configuration. + +~> **Note:** Only provider configurations are inherited by child modules, not provider source or version requirements. Each module must [declare its own provider requirements](/docs/language/providers/requirements.html). This is especially important for non-HashiCorp providers. + +In more complex situations there may be +[multiple provider configurations](/docs/language/providers/configuration.html#alias-multiple-provider-configurations), +or a child module may need to use different provider settings than +its parent. For such situations, you must pass providers explicitly. + +## Passing Providers Explicitly + +When child modules each need a different configuration of a particular +provider, or where the child module requires a different provider configuration +than its parent, you can use the `providers` argument within a `module` block +to explicitly define which provider configurations are available to the +child module. For example: + +```hcl +# The default "aws" configuration is used for AWS resources in the root +# module where no explicit provider instance is selected. +provider "aws" { + region = "us-west-1" +} + +# An alternate configuration is also defined for a different +# region, using the alias "usw2". +provider "aws" { + alias = "usw2" + region = "us-west-2" +} + +# An example child module is instantiated with the alternate configuration, +# so any AWS resources it defines will use the us-west-2 region. +module "example" { + source = "./example" + providers = { + aws = aws.usw2 + } +} +``` + +The `providers` argument within a `module` block is similar to +[the `provider` argument](/docs/language/meta-arguments/resource-provider.html) +within a resource, but is a map rather than a single string because a module may +contain resources from many different providers. + +The keys of the `providers` map are provider configuration names as expected by +the child module, and the values are the names of corresponding configurations +in the _current_ module. + +Once the `providers` argument is used in a `module` block, it overrides all of +the default inheritance behavior, so it is necessary to enumerate mappings +for _all_ of the required providers. This is to avoid confusion and surprises +that may result when mixing both implicit and explicit provider passing. + +Additional provider configurations (those with the `alias` argument set) are +_never_ inherited automatically by child modules, and so must always be passed +explicitly using the `providers` map. For example, a module +that configures connectivity between networks in two AWS regions is likely +to need both a source and a destination region. In that case, the root module +may look something like this: + +```hcl +provider "aws" { + alias = "usw1" + region = "us-west-1" +} + +provider "aws" { + alias = "usw2" + region = "us-west-2" +} + +module "tunnel" { + source = "./tunnel" + providers = { + aws.src = aws.usw1 + aws.dst = aws.usw2 + } +} +``` + +The subdirectory `./tunnel` must then contain _proxy configuration blocks_ like +the following, to declare that it requires its calling module to pass +configurations with these names in its `providers` argument: + +```hcl +provider "aws" { + alias = "src" +} + +provider "aws" { + alias = "dst" +} +``` + +Each resource should then have its own `provider` attribute set to either +`aws.src` or `aws.dst` to choose which of the two provider configurations to +use. + +## Proxy Configuration Blocks + +A proxy configuration block is one that contains only the `alias` argument. It +serves as a placeholder for provider configurations passed between modules, and +declares that a module expects to be explicitly passed an additional (aliased) +provider configuration. + +-> **Note:** Although a completely empty proxy configuration block is also +valid, it is not necessary: proxy configuration blocks are needed only to +establish which _aliased_ provider configurations a child module expects. +Don't use a proxy configuration block if a module only needs a single default +provider configuration, and don't use proxy configuration blocks only to imply +[provider requirements](/docs/language/providers/requirements.html). + +## Legacy Shared Modules with Provider Configurations + +In Terraform v0.10 and earlier there was no explicit way to use different +configurations of a provider in different modules in the same configuration, +and so module authors commonly worked around this by writing `provider` blocks +directly inside their modules, making the module have its own separate +provider configurations separate from those declared in the root module. + +However, that pattern had a significant drawback: because a provider +configuration is required to destroy the remote object associated with a +resource instance as well as to create or update it, a provider configuration +must always stay present in the overall Terraform configuration for longer +than all of the resources it manages. If a particular module includes +both resources and the provider configurations for those resources then +removing the module from its caller would violate that constraint: both the +resources and their associated providers would, in effect, be removed +simultaneously. + +Terraform v0.11 introduced the mechanisms described in earlier sections to +allow passing provider configurations between modules in a structured way, and +thus we explicitly recommended against writing a child module with its own +provider configuration blocks. However, that legacy pattern continued to work +for compatibility purposes -- though with the same drawback -- until Terraform +v0.13. + +Terraform v0.13 introduced the possibility for a module itself to use the +`for_each`, `count`, and `depends_on` arguments, but the implementation of +those unfortunately conflicted with the support for the legacy pattern. + +To retain the backward compatibility as much as possible, Terraform v0.13 +continues to support the legacy pattern for module blocks that do not use these +new features, but a module with its own provider configurations is not +compatible with `for_each`, `count`, or `depends_on`. Terraform will produce an +error if you attempt to combine these features. For example: + +``` +Error: Module does not support count + + on main.tf line 15, in module "child": + 15: count = 2 + +Module "child" cannot be used with count because it contains a nested provider +configuration for "aws", at child/main.tf:2,10-15. + +This module can be made compatible with count by changing it to receive all of +its provider configurations from the calling module, by using the "providers" +argument in the calling module block. +``` + +To make a module compatible with the new features, you must either remove all +of the `provider` blocks from its definition or, if you need multiple +configurations for the same provider, replace them with +_proxy configuration blocks_ as described in +[Passing Providers Explicitly](#passing-providers-explicitly). + +If the new version of the module uses proxy configuration blocks, or if the +calling module needs the child module to use different provider configurations +than its own default provider configurations, the calling module must then +include an explicit `providers` argument to describe which provider +configurations the child module will use: + +```hcl +provider "aws" { + region = "us-west-1" +} + +provider "aws" { + region = "us-east-1" + alias = "east" +} + +module "child" { + count = 2 + providers = { + # By default, the child module would use the + # default (unaliased) AWS provider configuration + # using us-west-1, but this will override it + # to use the additional "east" configuration + # for its resources instead. + aws = aws.east + } +} +``` + +Since the association between resources and provider configurations is +static, module calls using `for_each` or `count` cannot pass different +provider configurations to different instances. If you need different +instances of your module to use different provider configurations then you +must use a separate `module` block for each distinct set of provider +configurations: + +```hcl +provider "aws" { + alias = "usw1" + region = "us-west-1" +} + +provider "aws" { + alias = "usw2" + region = "us-west-2" +} + +provider "google" { + alias = "usw1" + credentials = "${file("account.json")}" + project = "my-project-id" + region = "us-west1" + zone = "us-west1-a" +} + +provider "google" { + alias = "usw2" + credentials = "${file("account.json")}" + project = "my-project-id" + region = "us-west2" + zone = "us-west2-a" +} + +module "bucket_w1" { + source = "./publish_bucket" + providers = { + aws.src = aws.usw1 + google.src = google.usw2 + } +} + +module "bucket_w2" { + source = "./publish_bucket" + providers = { + aws.src = aws.usw2 + google.src = google.usw2 + } +} +``` diff --git a/website/docs/modules/publish.html.markdown b/website/docs/language/modules/develop/publish.html.md similarity index 77% rename from website/docs/modules/publish.html.markdown rename to website/docs/language/modules/develop/publish.html.md index 1768b09de..93a17e3df 100644 --- a/website/docs/modules/publish.html.markdown +++ b/website/docs/language/modules/develop/publish.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Publishing Modules" sidebar_current: "docs-modules-publish" description: |- @@ -14,7 +14,7 @@ If you've built a module that you intend to be reused, we recommend your module, generate documentation, and more. Published modules can be easily consumed by Terraform, and users can -[constrain module versions](/docs/configuration/modules.html#module-versions) +[constrain module versions](/docs/language/modules/syntax.html#version) for safe and predictable updates. The following example shows how a caller might use a module from the Terraform Registry: @@ -32,11 +32,11 @@ the same benefits. Although the registry is the native mechanism for distributing re-usable modules, Terraform can also install modules from -[various other sources](/docs/modules/sources.html). The alternative sources +[various other sources](/docs/language/modules/sources.html). The alternative sources do not support the first-class versioning mechanism, but some sources have their own mechanisms for selecting particular VCS commits, etc. We recommend that modules distributed via other protocols still use the -[standard module structure](./#standard-module-structure) so that it can -be used in a similar way to a registry module, or even _become_ a registry -module at a later time. +[standard module structure](/docs/language/modules/develop/structure.html) so that they can +be used in a similar way as a registry module or be published on the registry +at a later time. diff --git a/website/docs/modules/index.html.markdown b/website/docs/language/modules/develop/structure.html.md similarity index 60% rename from website/docs/modules/index.html.markdown rename to website/docs/language/modules/develop/structure.html.md index c677d1fb9..6aed2b265 100644 --- a/website/docs/modules/index.html.markdown +++ b/website/docs/language/modules/develop/structure.html.md @@ -1,77 +1,9 @@ --- -layout: "docs" -page_title: "Creating Modules" -sidebar_current: "docs-modules" -description: |- - A module is a container for multiple resources that are used together. +layout: "language" +page_title: "Standard Module Structure" --- -# Creating Modules - -> **Hands-on:** Try the [Reuse Configuration with Modules](https://learn.hashicorp.com/collections/terraform/modules?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. - -A _module_ is a container for multiple resources that are used together. -Modules can be used to create lightweight abstractions, so that you can -describe your infrastructure in terms of its architecture, rather than -directly in terms of physical objects. - -The `.tf` files in your working directory when you run [`terraform plan`](/docs/commands/plan.html) -or [`terraform apply`](/docs/commands/apply.html) together form the _root_ -module. That module may [call other modules](/docs/configuration/modules.html#calling-a-child-module) -and connect them together by passing output values from one to input values -of another. - -To learn how to _use_ modules, see [the Modules configuration section](/docs/configuration/modules.html). -This section is about _creating_ re-usable modules that other configurations -can include using `module` blocks. - -## Module structure - -Re-usable modules are defined using all of the same -[configuration language](/docs/configuration/) concepts we use in root modules. -Most commonly, modules use: - -* [Input variables](/docs/configuration/variables.html) to accept values from - the calling module. -* [Output values](/docs/configuration/outputs.html) to return results to the - calling module, which it can then use to populate arguments elsewhere. -* [Resources](/docs/configuration/resources.html) to define one or more - infrastructure objects that the module will manage. - -To define a module, create a new directory for it and place one or more `.tf` -files inside just as you would do for a root module. Terraform can load modules -either from local relative paths or from remote repositories; if a module will -be re-used by lots of configurations you may wish to place it in its own -version control repository. - -Modules can also call other modules using a `module` block, but we recommend -keeping the module tree relatively flat and using [module composition](./composition.html) -as an alternative to a deeply-nested tree of modules, because this makes -the individual modules easier to re-use in different combinations. - -## When to write a module - -In principle any combination of resources and other constructs can be factored -out into a module, but over-using modules can make your overall Terraform -configuration harder to understand and maintain, so we recommend moderation. - -A good module should raise the level of abstraction by describing a new concept -in your architecture that is constructed from resource types offered by -providers. - -For example, `aws_instance` and `aws_elb` are both resource types belonging to -the AWS provider. You might use a module to represent the higher-level concept -"[HashiCorp Consul](https://www.consul.io/) cluster running in AWS" which -happens to be constructed from these and other AWS provider resources. - -We _do not_ recommend writing modules that are just thin wrappers around single -other resource types. If you have trouble finding a name for your module that -isn't the same as the main resource type inside it, that may be a sign that -your module is not creating any new abstraction and so the module is -adding unnecessary complexity. Just use the resource type directly in the -calling module instead. - -## Standard Module Structure +# Standard Module Structure The standard module structure is a file and directory layout we recommend for reusable modules distributed in separate repositories. Terraform tooling is @@ -122,8 +54,8 @@ don't need to do any extra work to follow the standard structure. * **Variables and outputs should have descriptions.** All variables and outputs should have one or two sentence descriptions that explain their purpose. This is used for documentation. See the documentation for - [variable configuration](/docs/configuration/variables.html) and - [output configuration](/docs/configuration/outputs.html) for more details. + [variable configuration](/docs/language/values/variables.html) and + [output configuration](/docs/language/values/outputs.html) for more details. * **Nested modules**. Nested modules should exist under the `modules/` subdirectory. Any nested module with a `README.md` is considered usable diff --git a/website/docs/language/modules/index.html.md b/website/docs/language/modules/index.html.md new file mode 100644 index 000000000..2aef04ab3 --- /dev/null +++ b/website/docs/language/modules/index.html.md @@ -0,0 +1,68 @@ +--- +layout: "language" +page_title: "Modules Overview - Configuration Language" +--- + +# Modules + +> **Hands-on:** Try the [Reuse Configuration with Modules](https://learn.hashicorp.com/collections/terraform/modules?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + +_Modules_ are containers for multiple resources that are used together. A module +consists of a collection of `.tf` and/or `.tf.json` files kept together in a +directory. + +Modules are the main way to package and reuse resource configurations with +Terraform. + +## The Root Module + +Every Terraform configuration has at least one module, known as its +_root module_, which consists of the resources defined in the `.tf` files in +the main working directory. + +## Child Modules + +A Terraform module (usually the root module of a configuration) can _call_ other +modules to include their resources into the configuration. A module that has +been called by another module is often referred to as a _child module._ + +Child modules can be called multiple times within the same configuration, and +multiple configurations can use the same child module. + +## Published Modules + +In addition to modules from the local filesystem, Terraform can load modules +from a public or private registry. This makes it possible to publish modules for +others to use, and to use modules that others have published. + +The [Terraform Registry](https://registry.terraform.io/browse/modules) hosts a +broad collection of publicly available Terraform modules for configuring many +kinds of common infrastructure. These modules are free to use, and Terraform can +download them automatically if you specify the appropriate source and version in +a module call block. + +Also, members of your organization might produce modules specifically crafted +for your own infrastructure needs. [Terraform Cloud](/docs/cloud/index.html) and +[Terraform Enterprise](/docs/enterprise/index.html) both include a private +module registry for sharing modules internally within your organization. + +## Using Modules + +- [Module Blocks](/docs/language/modules/syntax.html) documents the syntax for + calling a child module from a parent module, including meta-arguments like + `for_each`. + +- [Module Sources](/docs/language/modules/sources.html) documents what kinds of paths, + addresses, and URIs can be used in the `source` argument of a module block. + +- The Meta-Arguments section documents special arguments that can be used with + every module, including + [`providers`](/docs/language/meta-arguments/module-providers.html), + [`depends_on`](/docs/language/meta-arguments/depends_on.html), + [`count`](/docs/language/meta-arguments/count.html), + and [`for_each`](/docs/language/meta-arguments/for_each.html). + +## Developing Modules + +For information about developing reusable modules, see +[Module Development](/docs/language/modules/develop/index.html). diff --git a/website/docs/modules/sources.html.markdown b/website/docs/language/modules/sources.html.md similarity index 95% rename from website/docs/modules/sources.html.markdown rename to website/docs/language/modules/sources.html.md index afb6ecb83..c8e232909 100644 --- a/website/docs/modules/sources.html.markdown +++ b/website/docs/language/modules/sources.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Module Sources" sidebar_current: "docs-modules-sources" description: The source argument within a module block specifies the location of the source code of a child module. @@ -7,7 +7,7 @@ description: The source argument within a module block specifies the location of # Module Sources -The `source` argument in [a `module` block](/docs/configuration/modules.html) +The `source` argument in [a `module` block](/docs/language/modules/syntax.html) tells Terraform where to find the source code for the desired child module. Terraform uses this during the module installation step of `terraform init` @@ -116,19 +116,20 @@ module "consul" { ``` If you are using the SaaS version of Terraform Cloud, its private -registry hostname is `app.terraform.io`. If you are using a Terraform Enterprise -instance, its private registry hostname is the same hostname you use to -access the Terraform Cloud application. +registry hostname is `app.terraform.io`. If you use a self-hosted Terraform +Enterprise instance, its private registry hostname is the same as the host +where you'd access the web UI and the host you'd use when configuring +the `remote` backend. Registry modules support versioning. You can provide a specific version as shown in the above examples, or use flexible -[version constraints](/docs/configuration/modules.html#module-versions). +[version constraints](/docs/language/modules/syntax.html#version). You can learn more about the registry at the [Terraform Registry documentation](/docs/registry/modules/use.html#using-modules). To access modules from a private registry, you may need to configure an access -token [in the CLI config](/docs/commands/cli-config.html#credentials). Use the +token [in the CLI config](/docs/cli/config/config-file.html#credentials). Use the same hostname as used in the module source string. For a private registry within Terraform Cloud, use the same authentication token as you would use with the Enterprise API or command-line clients. @@ -183,7 +184,7 @@ a specific revision to install. Arbitrary Git repositories can be used by prefixing the address with the special `git::` prefix. After this prefix, any valid -[Git URL](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) +[Git URL](https://git-scm.com/docs/git-clone#_git_urls) can be specified to select one of the protocols supported by Git. For example, to use HTTPS or SSH: @@ -349,7 +350,7 @@ module "vpc" { ``` -> **Note:** If the content of the archive file is a directory, you will need to -include that directory in the module source. Read the section on +include that directory in the module source. Read the section on [Modules in Package Sub-directories](#modules-in-package-sub-directories) for more information. @@ -357,7 +358,7 @@ information. You can use archives stored in S3 as module sources using the special `s3::` prefix, followed by -[a path-style S3 bucket object URL](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +[an S3 bucket object URL](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). ```hcl module "consul" { diff --git a/website/docs/language/modules/syntax.html.md b/website/docs/language/modules/syntax.html.md new file mode 100644 index 000000000..9bbf78d9f --- /dev/null +++ b/website/docs/language/modules/syntax.html.md @@ -0,0 +1,197 @@ +--- +layout: "language" +page_title: "Modules - Configuration Language" +sidebar_current: "docs-config-modules" +description: |- + Modules allow multiple resources to be grouped together and encapsulated. +--- + +# Module Blocks + +> **Hands-on:** Try the [Reuse Configuration with Modules](https://learn.hashicorp.com/collections/terraform/modules?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + +A _module_ is a container for multiple resources that are used together. + +Every Terraform configuration has at least one module, known as its +_root module_, which consists of the resources defined in the `.tf` files in +the main working directory. + +A module can call other modules, which lets you include the child module's +resources into the configuration in a concise way. Modules +can also be called multiple times, either within the same configuration or +in separate configurations, allowing resource configurations to be packaged +and re-used. + +This page describes how to call one module from another. For more information +about creating re-usable child modules, see [Module Development](/docs/language/modules/develop/index.html). + +## Calling a Child Module + +To _call_ a module means to include the contents of that module into the +configuration with specific values for its +[input variables](/docs/language/values/variables.html). Modules are called +from within other modules using `module` blocks: + +```hcl +module "servers" { + source = "./app-cluster" + + servers = 5 +} +``` + +A module that includes a `module` block like this is the _calling module_ of the +child module. + +The label immediately after the `module` keyword is a local name, which the +calling module can use to refer to this instance of the module. + +Within the block body (between `{` and `}`) are the arguments for the module. +Module calls use the following kinds of arguments: + +- The `source` argument is mandatory for all modules. + +- The `version` argument is recommended for modules from a registry. + +- Most other arguments correspond to [input variables](/docs/language/values/variables.html) + defined by the module. (The `servers` argument in the example above is one of + these.) + +- Terraform defines a few other meta-arguments that can be used with all + modules, including `for_each` and `depends_on`. + +### Source + +All modules **require** a `source` argument, which is a meta-argument defined by +Terraform. Its value is either the path to a local directory containing the +module's configuration files, or a remote module source that Terraform should +download and use. This value must be a literal string with no template +sequences; arbitrary expressions are not allowed. For more information on +possible values for this argument, see [Module Sources](/docs/language/modules/sources.html). + +The same source address can be specified in multiple `module` blocks to create +multiple copies of the resources defined within, possibly with different +variable values. + +After adding, removing, or modifying `module` blocks, you must re-run +`terraform init` to allow Terraform the opportunity to adjust the installed +modules. By default this command will not upgrade an already-installed module; +use the `-upgrade` option to instead upgrade to the newest available version. + +### Version + +When using modules installed from a module registry, we recommend explicitly +constraining the acceptable version numbers to avoid unexpected or unwanted +changes. + +Use the `version` argument in the `module` block to specify versions: + +```shell +module "consul" { + source = "hashicorp/consul/aws" + version = "0.0.5" + + servers = 3 +} +``` + +The `version` argument accepts a [version constraint string](/docs/language/expressions/version-constraints.html). +Terraform will use the newest installed version of the module that meets the +constraint; if no acceptable versions are installed, it will download the newest +version that meets the constraint. + +Version constraints are supported only for modules installed from a module +registry, such as the public [Terraform Registry](https://registry.terraform.io/) +or [Terraform Cloud's private module registry](/docs/cloud/registry/index.html). +Other module sources can provide their own versioning mechanisms within the +source string itself, or might not support versions at all. In particular, +modules sourced from local file paths do not support `version`; since +they're loaded from the same source repository, they always share the same +version as their caller. + +### Meta-arguments + +Along with `source` and `version`, Terraform defines a few more +optional meta-arguments that have special meaning across all modules, +described in more detail in the following pages: + +- `count` - Creates multiple instances of a module from a single `module` block. + See [the `count` page](/docs/language/meta-arguments/count.html) + for details. + +- `for_each` - Creates multiple instances of a module from a single `module` + block. See + [the `for_each` page](/docs/language/meta-arguments/for_each.html) + for details. + +- `providers` - Passes provider configurations to a child module. See + [the `providers` page](/docs/language/meta-arguments/module-providers.html) + for details. If not specified, the child module inherits all of the default + (un-aliased) provider configurations from the calling module. + +- `depends_on` - Creates explicit dependencies between the entire + module and the listed targets. See + [the `depends_on` page](/docs/language/meta-arguments/depends_on.html) + for details. + +In addition to the above, the `lifecycle` argument is not currently used by +Terraform but is reserved for planned future features. + +## Accessing Module Output Values + +The resources defined in a module are encapsulated, so the calling module +cannot access their attributes directly. However, the child module can +declare [output values](/docs/language/values/outputs.html) to selectively +export certain values to be accessed by the calling module. + +For example, if the `./app-cluster` module referenced in the example above +exported an output value named `instance_ids` then the calling module +can reference that result using the expression `module.servers.instance_ids`: + +```hcl +resource "aws_elb" "example" { + # ... + + instances = module.servers.instance_ids +} +``` + +For more information about referring to named values, see +[Expressions](/docs/language/expressions/index.html). + +## Transferring Resource State Into Modules + +When refactoring an existing configuration to split code into child modules, +moving resource blocks between modules causes Terraform to see the new location +as an entirely different resource from the old. Always check the execution plan +after moving code across modules to ensure that no resources are deleted by +surprise. + +If you want to make sure an existing resource is preserved, use +[the `terraform state mv` command](/docs/cli/commands/state/mv.html) to inform +Terraform that it has moved to a different module. + +When passing resource addresses to `terraform state mv`, resources within child +modules must be prefixed with `module..`. If a module was called with +[`count`](/docs/language/meta-arguments/count.html) or +[`for_each`](/docs/language/meta-arguments/for_each.html), +its resource addresses must be prefixed with `module.[].` +instead, where `` matches the `count.index` or `each.key` value of a +particular module instance. + +Full resource addresses for module contents are used within the UI and on the +command line, but cannot be used within a Terraform configuration. Only +[outputs](/docs/language/values/outputs.html) from a module can be referenced from +elsewhere in your configuration. + +## Tainting resources within a module + +The [taint command](/docs/cli/commands/taint.html) can be used to _taint_ specific +resources within a module: + +```shell +$ terraform taint module.salt_master.aws_instance.salt_master +``` + +It is not possible to taint an entire module. Instead, each resource within +the module must be tainted separately. diff --git a/website/docs/configuration/providers.html.md b/website/docs/language/providers/configuration.html.md similarity index 88% rename from website/docs/configuration/providers.html.md rename to website/docs/language/providers/configuration.html.md index 4a4771b45..612e8f9d5 100644 --- a/website/docs/configuration/providers.html.md +++ b/website/docs/language/providers/configuration.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provider Configuration - Configuration Language" sidebar_current: "docs-config-providers" description: |- @@ -8,10 +8,6 @@ description: |- # Provider Configuration --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Providers](../configuration-0-11/providers.html). - Terraform relies on plugins called "providers" to interact with remote systems. Terraform configurations must declare which providers they require, so that @@ -20,7 +16,7 @@ configuration (like endpoint URLs or cloud regions) before they can be used. - This page documents how to configure settings for providers. -- The [Provider Requirements](./provider-requirements.html) page documents how +- The [Provider Requirements](/docs/language/providers/requirements.html) page documents how to declare providers so Terraform can install them. ## Provider Configuration @@ -28,7 +24,8 @@ configuration (like endpoint URLs or cloud regions) before they can be used. Provider configurations belong in the root module of a Terraform configuration. (Child modules receive their provider configurations from the root module; for more information, see -[Providers Within Modules](./modules.html#providers-within-modules).) +[The Module `providers` Meta-Argument](/docs/language/meta-arguments/module-providers.html) +and [Module Development: Providers Within Modules](/docs/language/modules/develop/providers.html).) A provider configuration is created using a `provider` block: @@ -40,7 +37,7 @@ provider "google" { ``` The name given in the block header (`"google"` in this example) is the -[local name](./provider-requirements.html#local-names) of the provider to +[local name](/docs/language/providers/requirements.html#local-names) of the provider to configure. This provider should already be included in a `required_providers` block. @@ -49,7 +46,7 @@ the provider. Most arguments in this section are defined by the provider itself; in this example both `project` and `region` are specific to the `google` provider. -You can use [expressions](./expressions.html) in the values of these +You can use [expressions](/docs/language/expressions/index.html) in the values of these configuration arguments, but can only reference values that are known before the configuration is applied. This means you can safely reference input variables, but not attributes exported by resources (with an exception for resource @@ -71,7 +68,7 @@ and available for all `provider` blocks: - [`alias`, for using the same provider with different configurations for different resources][inpage-alias] - [`version`, which we no longer recommend][inpage-versions] (use - [provider requirements](./provider-requirements.html) instead) + [provider requirements](/docs/language/providers/requirements.html) instead) Unlike many other objects in the Terraform language, a `provider` block may be omitted if its contents would otherwise be empty. Terraform assumes an @@ -79,7 +76,7 @@ empty default configuration for any provider that is not explicitly configured. ## `alias`: Multiple Provider Configurations -[inpage-alias]: #alias-multiple-provider-instances +[inpage-alias]: #alias-multiple-provider-configurations You can optionally define multiple configurations for the same provider, and select which one to use on a per-resource or per-module basis. The primary @@ -160,7 +157,7 @@ module "aws_vpc" { ``` Modules have some special requirements when passing in providers; see -[Providers Within Modules](./modules.html#providers-within-modules) +[The Module `providers` Meta-Argument](/docs/language/meta-arguments/module-providers.html) for more details. In most cases, only _root modules_ should define provider configurations, with all child modules obtaining their provider configurations from their parents. @@ -173,13 +170,13 @@ from their parents. The `version` meta-argument specifies a version constraint for a provider, and works the same way as the `version` argument in a -[`required_providers` block](./provider-requirements.html). The version +[`required_providers` block](/docs/language/providers/requirements.html). The version constraint in a provider configuration is only used if `required_providers` does not include one for that provider. -**The `version` argument in provider configurations is deprecated.** +**The `version` argument in provider configurations is deprecated.** In Terraform 0.13 and later, version constraints should always be declared in -[the `required_providers` block](./provider-requirements.html). The `version` +[the `required_providers` block](/docs/language/providers/requirements.html). The `version` argument will be removed in a future version of Terraform. -> **Note:** The `version` meta-argument made sense before Terraform 0.13, since diff --git a/website/docs/language/providers/index.html.md b/website/docs/language/providers/index.html.md new file mode 100644 index 000000000..44f830101 --- /dev/null +++ b/website/docs/language/providers/index.html.md @@ -0,0 +1,122 @@ +--- +layout: "language" +page_title: "Providers - Configuration Language" +--- + +# Providers + +Terraform relies on plugins called "providers" to interact with remote systems. + +Terraform configurations must declare which providers they require so that +Terraform can install and use them. Additionally, some providers require +configuration (like endpoint URLs or cloud regions) before they can be used. + +## What Providers Do + +Each provider adds a set of [resource types](/docs/language/resources/index.html) +and/or [data sources](/docs/language/data-sources/index.html) that Terraform can +manage. + +Every resource type is implemented by a provider; without providers, Terraform +can't manage any kind of infrastructure. + +Most providers configure a specific infrastructure platform (either cloud or +self-hosted). Providers can also offer local utilities for tasks like +generating random numbers for unique resource names. + +## Where Providers Come From + +Providers are distributed separately from Terraform itself, and each provider +has its own release cadence and version numbers. + +The [Terraform Registry](https://registry.terraform.io/browse/providers) +is the main directory of publicly available Terraform providers, and hosts +providers for most major infrastructure platforms. + +## How to Use Providers + +To use resources from a given provider, you need to include some information +about it in your configuration. See the following pages for details: + +- [Provider Requirements](/docs/language/providers/requirements.html) + documents how to declare providers so Terraform can install them. + +- [Provider Configuration](/docs/language/providers/configuration.html) + documents how to configure settings for providers. + +- [Dependency Lock File](/docs/language/dependency-lock.html) + documents an additional HCL file that can be included with a configuration, + which tells Terraform to always use a specific set of provider versions. + +## Provider Installation + +- Terraform Cloud and Terraform Enterprise install providers as part of every run. + +- Terraform CLI finds and installs providers when + [initializing a working directory](/docs/cli/init/index.html). It can + automatically download providers from a Terraform registry, or load them from + a local mirror or cache. If you are using a persistent working directory, you + must reinitialize whenever you change a configuration's providers. + + To save time and bandwidth, Terraform CLI supports an optional plugin + cache. You can enable the cache using the `plugin_cache_dir` setting in + [the CLI configuration file](/docs/cli/config/config-file.html). + +To ensure Terraform always installs the same provider versions for a given +configuration, you can use Terraform CLI to create a +[dependency lock file](/docs/language/dependency-lock.html) +and commit it to version control along with your configuration. If a lock file +is present, Terraform Cloud, CLI, and Enterprise will all obey it when +installing providers. + +> **Hands-on:** Try the [Lock and Upgrade Provider Versions](https://learn.hashicorp.com/tutorials/terraform/provider-versioning?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + +## How to Find Providers + +To find providers for the infrastructure platforms you use, browse +[the providers section of the Terraform Registry](https://registry.terraform.io/browse/providers). + +Some providers on the Registry are developed and published by HashiCorp, some +are published by platform maintainers, and some are published by users and +volunteers. The provider listings use the following badges to indicate who +develops and maintains a given provider. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TierDescriptionNamespace
Official providers are owned and maintained by HashiCorp hashicorp
Verified providers are owned and maintained by third-party technology partners. Providers in this tier indicate HashiCorp has verified the authenticity of the Provider’s publisher, and that the partner is a member of the HashiCorp Technology Partner Program.Third-party organization, e.g. mongodb/mongodbatlas
Community providers are published to the Terraform Registry by individual maintainers, groups of maintainers, or other members of the Terraform community.
Maintainer’s individual or organization account, e.g. DeviaVir/gsuite
Archived Providers are Official or Verified Providers that are no longer maintained by HashiCorp or the community. This may occur if an API is deprecated or interest was low.hashicorp or third-party
+ + +## How to Develop Providers + +Providers are written in Go, using the Terraform Plugin SDK. For more +information on developing providers, see: + +- The [Extending Terraform](/docs/extend/index.html) documentation +- The [Call APIs with Terraform Providers](https://learn.hashicorp.com/collections/terraform/providers?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) + collection on HashiCorp Learn diff --git a/website/docs/configuration/provider-requirements.html.md b/website/docs/language/providers/requirements.html.md similarity index 82% rename from website/docs/configuration/provider-requirements.html.md rename to website/docs/language/providers/requirements.html.md index c5e1c1ced..128192432 100644 --- a/website/docs/configuration/provider-requirements.html.md +++ b/website/docs/language/providers/requirements.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provider Requirements - Configuration Language" --- @@ -7,8 +7,7 @@ page_title: "Provider Requirements - Configuration Language" -> **Note:** This page is about a feature of Terraform 0.13 and later; it also describes how to use the more limited version of that feature that was available -in Terraform 0.12. If you are using Terraform 0.11 or earlier, see -[0.11 Configuration Language: Provider Versions](../configuration-0-11/providers.html#provider-versions) instead. +in Terraform 0.12. Terraform relies on plugins called "providers" to interact with remote systems. @@ -18,56 +17,9 @@ configuration (like endpoint URLs or cloud regions) before they can be used. - This page documents how to declare providers so Terraform can install them. -- The [Provider Configuration](./providers.html) page documents how to configure +- The [Provider Configuration](/docs/language/providers/configuration.html) page documents how to configure settings for providers. -## About Providers - -Providers are plugins. They are released on a separate rhythm from Terraform -itself, and each provider has its own series of version numbers. - -Each provider plugin offers a set of -[resource types](resources.html#resource-types-and-arguments), and defines for -each resource type which arguments it accepts, which attributes it exports, and -how changes to resources of that type are actually applied to remote APIs. - -Most providers configure a specific infrastructure platform (either cloud or -self-hosted). Providers can also offer local utilities for tasks like -generating random numbers for unique resource names. - -The [Terraform Registry](https://registry.terraform.io/browse/providers) -is the main directory of publicly available Terraform providers, and hosts -providers for most major infrastructure platforms. You can also write and -distribute your own Terraform providers, for public or private use. - -> **Hands-on:** If you're interested in developing your own Terraform providers, try the [Call APIs with Terraform Providers](https://learn.hashicorp.com/collections/terraform/providers?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. - -### Provider Installation - -Terraform finds and installs providers when -[initializing a working directory](/docs/commands/init.html). It can -automatically download providers from a Terraform registry, or load them from a -local mirror or cache. - -When you add a new provider to a configuration, Terraform must install the -provider in order to use it. If you are using a persistent working directory, -you can run `terraform init` again to install new providers. - -Providers downloaded by `terraform init` are only installed for the current -working directory; other working directories can have their own installed -provider plugins. To help ensure that each working directory will use the same -selected versions, `terraform init` records its version selections in -your configuration's [dependency lock file](dependency-lock.html), named -`.terraform.lock.hcl` and will always make those same selections unless -you run `terraform init -upgrade` to update them. - -To save time and bandwidth, Terraform supports an optional plugin cache. You can -enable the cache using the `plugin_cache_dir` setting in -[the CLI configuration file](/docs/commands/cli-config.html). - -For more information about provider installation, see -[the `terraform init` command](/docs/commands/init.html). - ## Requiring Providers Each Terraform module must declare which providers it requires, so that @@ -89,7 +41,7 @@ terraform { ``` The `required_providers` block must be nested inside the top-level -[`terraform` block](terraform.html) (which can also contain other settings). +[`terraform` block](/docs/language/settings/index.html) (which can also contain other settings). Each argument in the `required_providers` block enables one provider. The key determines the provider's [local name](#local-names) (its unique identifier @@ -126,7 +78,7 @@ Local names must be unique per-module. Outside of the `required_providers` block, Terraform configurations always refer to providers by their local names. For example, the following configuration declares `mycloud` as the local name for `mycorp/mycloud`, then uses that local -name when [configuring the provider](./providers.html): +name when [configuring the provider](/docs/language/providers/configuration.html): ```hcl terraform { @@ -236,12 +188,12 @@ terraform { # References to these providers elsewhere in the # module will use these compound local names. -provider "mycorp_http" { +provider "mycorp-http" { # ... } data "http" "example" { - provider = hashicorp_http + provider = hashicorp-http #... } ``` @@ -256,7 +208,7 @@ avoiding typing. Each provider plugin has its own set of available versions, allowing the functionality of the provider to evolve over time. Each provider dependency you -declare should have a [version constraint](./version-constraints.html) given in +declare should have a [version constraint](/docs/language/expressions/version-constraints.html) given in the `version` argument so Terraform can select a single version per provider that all modules are compatible with. @@ -264,6 +216,15 @@ The `version` argument is optional; if omitted, Terraform will accept any version of the provider as compatible. However, we strongly recommend specifying a version constraint for every provider your module depends on. +To ensure Terraform always installs the same provider versions for a given +configuration, you can use Terraform CLI to create a +[dependency lock file](/docs/language/dependency-lock.html) +and commit it to version control along with your configuration. If a lock file +is present, Terraform Cloud, CLI, and Enterprise will all obey it when +installing providers. + +> **Hands-on:** Try the [Lock and Upgrade Provider Versions](https://learn.hashicorp.com/tutorials/terraform/provider-versioning?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. + ### Best Practices for Provider Versions Each module should at least declare the minimum provider version it is known @@ -309,7 +270,7 @@ incompatibilities, and let the root module manage the maximum version. While most Terraform providers are distributed separately as plugins, there is currently one provider that is built in to Terraform itself, which provides -[the `terraform_remote_state` data source](/docs/providers/terraform/d/remote_state.html). +[the `terraform_remote_state` data source](/docs/language/state/remote-state-data.html). Because this provider is built in to Terraform, you don't need to declare it in the `required_providers` block in order to use its features. However, for @@ -342,7 +303,7 @@ registry, by implementing Running an additional service just to distribute a single provider internally may be undesirable, so Terraform also supports -[other provider installation methods](/docs/commands/cli-config.html#provider-installation), +[other provider installation methods](/docs/cli/config/config-file.html#provider-installation), including placing provider plugins directly in specific directories in the local filesystem, via _filesystem mirrors_. @@ -371,7 +332,7 @@ terraform { To make version 1.0.0 of this provider available for installation from the local filesystem, choose one of the -[implied local mirror directories](/docs/commands/cli-config.html#implied-local-mirror-directories) +[implied local mirror directories](/docs/cli/config/config-file.html#implied-local-mirror-directories) and create a directory structure under it like this: ``` diff --git a/website/docs/language/resources/behavior.html.md b/website/docs/language/resources/behavior.html.md new file mode 100644 index 000000000..70c0e2da7 --- /dev/null +++ b/website/docs/language/resources/behavior.html.md @@ -0,0 +1,108 @@ +--- +layout: "language" +page_title: "Resource Behavior - Configuration Language" +--- + +# Resource Behavior + +A `resource` block declares that you want a particular infrastructure object +to exist with the given settings. If you are writing a new configuration for +the first time, the resources it defines will exist _only_ in the configuration, +and will not yet represent real infrastructure objects in the target platform. + +_Applying_ a Terraform configuration is the process of creating, updating, +and destroying real infrastructure objects in order to make their settings +match the configuration. + +## How Terraform Applies a Configuration + +When Terraform creates a new infrastructure object represented by a `resource` +block, the identifier for that real object is saved in Terraform's +[state](/docs/language/state/index.html), allowing it to be updated and destroyed +in response to future changes. For resource blocks that already have an +associated infrastructure object in the state, Terraform compares the +actual configuration of the object with the arguments given in the +configuration and, if necessary, updates the object to match the configuration. + +In summary, applying a Terraform configuration will: + +- _Create_ resources that exist in the configuration but are not associated with a real infrastructure object in the state. +- _Destroy_ resources that exist in the state but no longer exist in the configuration. +- _Update in-place_ resources whose arguments have changed. +- _Destroy and re-create_ resources whose arguments have changed but which cannot be updated in-place due to remote API limitations. + +This general behavior applies for all resources, regardless of type. The +details of what it means to create, update, or destroy a resource are different +for each resource type, but this standard set of verbs is common across them +all. + +The meta-arguments within `resource` blocks, documented in the +sections below, allow some details of this standard resource behavior to be +customized on a per-resource basis. + +## Accessing Resource Attributes + +[Expressions](/docs/language/expressions/index.html) within a Terraform module can access +information about resources in the same module, and you can use that information +to help configure other resources. Use the `..` +syntax to reference a resource attribute in an expression. + +In addition to arguments specified in the configuration, resources often provide +read-only attributes with information obtained from the remote API; this often +includes things that can't be known until the resource is created, like the +resource's unique random ID. + +Many providers also include [data sources](/docs/language/data-sources/index.html), +which are a special type of resource used only for looking up information. + +For a list of the attributes a resource or data source type provides, consult +its documentation; these are generally included in a second list below its list +of configurable arguments. + +For more information about referencing resource attributes in expressions, see +[Expressions: References to Resource Attributes](/docs/language/expressions/references.html#references-to-resource-attributes). + +## Resource Dependencies + +Most resources in a configuration don't have any particular relationship, and +Terraform can make changes to several unrelated resources in parallel. + +However, some resources must be processed after other specific resources; +sometimes this is because of how the resource works, and sometimes the +resource's configuration just requires information generated by another +resource. + +Most resource dependencies are handled automatically. Terraform analyses any +[expressions](/docs/language/expressions/index.html) within a `resource` block to find references +to other objects, and treats those references as implicit ordering requirements +when creating, updating, or destroying resources. Since most resources with +behavioral dependencies on other resources also refer to those resources' data, +it's usually not necessary to manually specify dependencies between resources. + +However, some dependencies cannot be recognized implicitly in configuration. For +example, if Terraform must manage access control policies _and_ take actions +that require those policies to be present, there is a hidden dependency between +the access policy and a resource whose creation depends on it. In these rare +cases, +[the `depends_on` meta-argument](/docs/language/meta-arguments/depends_on.html) +can explicitly specify a dependency. + +## Local-only Resources + +While most resource types correspond to an infrastructure object type that +is managed via a remote network API, there are certain specialized resource +types that operate only within Terraform itself, calculating some results and +saving those results in the state for future use. + +For example, local-only resource types exist for +[generating private keys](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key), +[issuing self-signed TLS certificates](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/self_signed_cert), +and even [generating random ids](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id). +While these resource types often have a more marginal purpose than those +managing "real" infrastructure objects, they can be useful as glue to help +connect together other resources. + +The behavior of local-only resources is the same as all other resources, but +their result data exists only within the Terraform state. "Destroying" such +a resource means only to remove it from the state, discarding its data. + diff --git a/website/docs/language/resources/index.html.md b/website/docs/language/resources/index.html.md new file mode 100644 index 000000000..891202d34 --- /dev/null +++ b/website/docs/language/resources/index.html.md @@ -0,0 +1,34 @@ +--- +layout: "language" +page_title: "Resources Overview - Configuration Language" +--- + +# Resources + +> **Hands-on:** Try the [Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + +_Resources_ are the most important element in the Terraform language. +Each resource block describes one or more infrastructure objects, such +as virtual networks, compute instances, or higher-level components such +as DNS records. + +- [Resource Blocks](/docs/language/resources/syntax.html) documents + the syntax for declaring resources. + +- [Resource Behavior](/docs/language/resources/behavior.html) explains in + more detail how Terraform handles resource declarations when applying a + configuration. + +- The Meta-Arguments section documents special arguments that can be used with + every resource type, including + [`depends_on`](/docs/language/meta-arguments/depends_on.html), + [`count`](/docs/language/meta-arguments/count.html), + [`for_each`](/docs/language/meta-arguments/for_each.html), + [`provider`](/docs/language/meta-arguments/resource-provider.html), + and [`lifecycle`](/docs/language/meta-arguments/lifecycle.html). + +- [Provisioners](/docs/language/resources/provisioners/index.html) + documents configuring post-creation actions for a resource using the + `provisioner` and `connection` blocks. Since provisioners are non-declarative + and potentially unpredictable, we strongly recommend that you treat them as a + last resort. diff --git a/website/docs/provisioners/chef.html.markdown b/website/docs/language/resources/provisioners/chef.html.md similarity index 92% rename from website/docs/provisioners/chef.html.markdown rename to website/docs/language/resources/provisioners/chef.html.md index 87371b65c..e420b9a38 100644 --- a/website/docs/provisioners/chef.html.markdown +++ b/website/docs/language/resources/provisioners/chef.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioner: chef" sidebar_current: "docs-provisioners-chef" description: |- @@ -10,12 +10,9 @@ description: |- The `chef` provisioner installs, configures and runs the Chef Client on a remote resource. The `chef` provisioner supports both `ssh` and `winrm` type -[connections](/docs/provisioners/connection.html). +[connections](/docs/language/resources/provisioners/connection.html). -!> **Note:** This provisioner has been deprecated as of Terraform 0.13.4 and will be -removed in a future version of Terraform. For most common situations there are better -alternatives to using provisioners. For more information, see -[the main Provisioners page](./). +!> **Note:** This provisioner was removed in the 0.15.0 version of Terraform after being deprecated as of Terraform 0.13.4. For most common situations there are better alternatives to using provisioners. For more information, see [the main Provisioners page](./). ## Requirements @@ -71,7 +68,7 @@ The following arguments are supported: * `attributes_json (string)` - (Optional) A raw JSON string with initial node attributes for the new node. These can also be loaded from a file on disk using - [the `file` function](/docs/configuration/functions/file.html). + [the `file` function](/docs/language/functions/file.html). * `channel (string)` - (Optional) The Chef Client release channel to install from. If not set, the `stable` channel will be used. @@ -130,7 +127,7 @@ The following arguments are supported: * `prevent_sudo (boolean)` - (Optional) Prevent the use of the `sudo` command while installing, configuring and running the initial Chef Client run. This option is only used with `ssh` type - [connections](/docs/provisioners/connection.html). + [connections](/docs/language/resources/provisioners/connection.html). * `recreate_client (boolean)` - (Optional) If `true`, first delete any existing Chef Node and Client before registering the new Chef Client. @@ -148,7 +145,7 @@ The following arguments are supported: * `secret_key (string)` - (Optional) The contents of the secret key that is used by the Chef Client to decrypt data bags on the Chef Server. The key will be uploaded to the remote machine. This can also be loaded from a file on disk using - [the `file` function](/docs/configuration/functions/file.html). + [the `file` function](/docs/language/functions/file.html). * `server_url (string)` - (Required) The URL to the Chef server. This includes the path to the organization. See the example. @@ -170,11 +167,11 @@ The following arguments are supported: * `user_key (string)` - (Required) The contents of the user key that will be used to authenticate with the Chef Server. This can also be loaded from a file on disk using - [the `file` function](/docs/configuration/functions/file.html). + [the `file` function](/docs/language/functions/file.html). * `vault_json (string)` - (Optional) A raw JSON string with Chef Vaults and Items to which the new node should have access. These can also be loaded from a file on disk using - [the `file` function](/docs/configuration/functions/file.html). + [the `file` function](/docs/language/functions/file.html). * `version (string)` - (Optional) The Chef Client version to install on the remote machine. If not set, the latest available version will be installed. diff --git a/website/docs/provisioners/connection.html.markdown b/website/docs/language/resources/provisioners/connection.html.md similarity index 88% rename from website/docs/provisioners/connection.html.markdown rename to website/docs/language/resources/provisioners/connection.html.md index f290a9829..698ddfad7 100644 --- a/website/docs/provisioners/connection.html.markdown +++ b/website/docs/language/resources/provisioners/connection.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioner Connection Settings" sidebar_current: "docs-provisioners-connection" description: |- @@ -108,12 +108,12 @@ block would create a dependency cycle. * `private_key` - The contents of an SSH key to use for the connection. These can be loaded from a file on disk using - [the `file` function](/docs/configuration/functions/file.html). This takes + [the `file` function](/docs/language/functions/file.html). This takes preference over the password if provided. * `certificate` - The contents of a signed CA Certificate. The certificate argument must be used in conjunction with a `private_key`. These can - be loaded from a file on disk using the [the `file` function](/docs/configuration/functions/file.html). + be loaded from a file on disk using the [the `file` function](/docs/language/functions/file.html). * `agent` - Set to `false` to disable using `ssh-agent` to authenticate. On Windows the only supported SSH authentication agent is @@ -123,6 +123,9 @@ block would create a dependency cycle. * `host_key` - The public key from the remote host or the signing CA, used to verify the connection. +* `target_platform` - The target platform to connect to. Valid values are `windows` and `unix`. Defaults to `unix` if not set. + + If the platform is set to `windows`, the default `script_path` is `c:\windows\temp\terraform_%RAND%.cmd`, assuming [the SSH default shell](https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_server_configuration#configuring-the-default-shell-for-openssh-in-windows) is `cmd.exe`. If the SSH default shell is PowerShell, set `script_path` to `"c:/windows/temp/terraform_%RAND%.ps1"` **Additional arguments only supported by the `winrm` connection type:** @@ -158,9 +161,9 @@ The `ssh` connection also supports the following fields to facilitate connnectio * `bastion_private_key` - The contents of an SSH key file to use for the bastion host. These can be loaded from a file on disk using - [the `file` function](/docs/configuration/functions/file.html). + [the `file` function](/docs/language/functions/file.html). Defaults to the value of the `private_key` field. * `bastion_certificate` - The contents of a signed CA Certificate. The certificate argument must be used in conjunction with a `bastion_private_key`. These can be loaded from - a file on disk using the [the `file` function](/docs/configuration/functions/file.html). \ No newline at end of file + a file on disk using the [the `file` function](/docs/language/functions/file.html). diff --git a/website/docs/provisioners/file.html.markdown b/website/docs/language/resources/provisioners/file.html.md similarity index 92% rename from website/docs/provisioners/file.html.markdown rename to website/docs/language/resources/provisioners/file.html.md index 5b3c80b0c..05b494e5b 100644 --- a/website/docs/provisioners/file.html.markdown +++ b/website/docs/language/resources/provisioners/file.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioner: file" sidebar_current: "docs-provisioners-file" description: |- @@ -10,7 +10,7 @@ description: |- The `file` provisioner is used to copy files or directories from the machine executing Terraform to the newly created resource. The `file` provisioner -supports both `ssh` and `winrm` type [connections](/docs/provisioners/connection.html). +supports both `ssh` and `winrm` type [connections](/docs/language/resources/provisioners/connection.html). -> **Note:** Provisioners should only be used as a last resort. For most common situations there are better alternatives. For more information, see @@ -59,7 +59,7 @@ The following arguments are supported: * `content` - This is the content to copy on the destination. If destination is a file, the content will be written on that file, in case of a directory a file named `tf-file-content` is created. It's recommended to use a file as the destination. A - [`template_file`](/docs/providers/template/d/file.html) might be referenced in here, or + [`template_file`](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) might be referenced in here, or any interpolation syntax. This attribute cannot be specified with `source`. * `destination` - (Required) This is the destination path. It must be specified as an diff --git a/website/docs/provisioners/habitat.html.markdown b/website/docs/language/resources/provisioners/habitat.html.md similarity index 95% rename from website/docs/provisioners/habitat.html.markdown rename to website/docs/language/resources/provisioners/habitat.html.md index 4c223bb2e..66af5a655 100644 --- a/website/docs/provisioners/habitat.html.markdown +++ b/website/docs/language/resources/provisioners/habitat.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioner: habitat" sidebar_current: "docs-provisioners-habitat" description: |- @@ -10,10 +10,7 @@ description: |- The `habitat` provisioner installs the [Habitat](https://habitat.sh) supervisor and loads configured services. This provisioner only supports Linux targets using the `ssh` connection type at this time. -!> **Note:** This provisioner has been deprecated as of Terraform 0.13.4 and will be -removed in a future version of Terraform. For most common situations there are better -alternatives to using provisioners. For more information, see -[the main Provisioners page](./). +!> **Note:** This provisioner was removed in the 0.15.0 version of Terraform after being deprecated as of Terraform 0.13.4. For most common situations there are better alternatives to using provisioners. For more information, see [the main Provisioners page](./). ## Requirements diff --git a/website/docs/language/resources/provisioners/index.html.md b/website/docs/language/resources/provisioners/index.html.md new file mode 100644 index 000000000..c9fef64bb --- /dev/null +++ b/website/docs/language/resources/provisioners/index.html.md @@ -0,0 +1,11 @@ +--- +layout: "language" +page_title: "Provisioners Overview - Configuration Language" +--- + +# Provisioners + +Provisioners can be used to model specific actions on the local machine or on a +remote machine in order to prepare servers or other infrastructure objects for +service. + diff --git a/website/docs/provisioners/local-exec.html.markdown b/website/docs/language/resources/provisioners/local-exec.html.md similarity index 94% rename from website/docs/provisioners/local-exec.html.markdown rename to website/docs/language/resources/provisioners/local-exec.html.md index 2f8cf628f..f0d463ea7 100644 --- a/website/docs/provisioners/local-exec.html.markdown +++ b/website/docs/language/resources/provisioners/local-exec.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioner: local-exec" sidebar_current: "docs-provisioners-local" description: |- @@ -11,7 +11,7 @@ description: |- The `local-exec` provisioner invokes a local executable after a resource is created. This invokes a process on the machine running Terraform, not on the resource. See the `remote-exec` -[provisioner](/docs/provisioners/remote-exec.html) to run commands on the +[provisioner](/docs/language/resources/provisioners/remote-exec.html) to run commands on the resource. Note that even though the resource will be fully created when the provisioner is @@ -29,7 +29,7 @@ resource "aws_instance" "web" { # ... provisioner "local-exec" { - command = "echo ${aws_instance.web.private_ip} >> private_ips.txt" + command = "echo ${self.private_ip} >> private_ips.txt" } } ``` diff --git a/website/docs/provisioners/null_resource.html.markdown b/website/docs/language/resources/provisioners/null_resource.html.md similarity index 85% rename from website/docs/provisioners/null_resource.html.markdown rename to website/docs/language/resources/provisioners/null_resource.html.md index f7609ab66..7fb94a4e8 100644 --- a/website/docs/provisioners/null_resource.html.markdown +++ b/website/docs/language/resources/provisioners/null_resource.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioners Without a Resource" sidebar_current: "docs-provisioners-null-resource" description: |- @@ -9,15 +9,15 @@ description: |- # Provisioners Without a Resource -[null]: /docs/providers/null/resource.html +[null]: https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource If you need to run provisioners that aren't directly associated with a specific resource, you can associate them with a `null_resource`. Instances of [`null_resource`][null] are treated like normal resources, but they don't do anything. Like with any other resource, you can configure -[provisioners](/docs/provisioners/index.html) and [connection -details](/docs/provisioners/connection.html) on a `null_resource`. You can also +[provisioners](/docs/language/resources/provisioners/syntax.html) and [connection +details](/docs/language/resources/provisioners/connection.html) on a `null_resource`. You can also use its `triggers` argument and any meta-arguments to control exactly where in the dependency graph its provisioners will run. diff --git a/website/docs/provisioners/puppet.html.markdown b/website/docs/language/resources/provisioners/puppet.html.md similarity index 91% rename from website/docs/provisioners/puppet.html.markdown rename to website/docs/language/resources/provisioners/puppet.html.md index 70f754cf1..0e5005256 100644 --- a/website/docs/provisioners/puppet.html.markdown +++ b/website/docs/language/resources/provisioners/puppet.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioner: puppet" sidebar_current: "docs-provisioners-puppet" description: |- @@ -10,12 +10,9 @@ description: |- The `puppet` provisioner installs, configures and runs the Puppet agent on a remote resource. The `puppet` provisioner supports both `ssh` and `winrm` type -[connections](/docs/provisioners/connection.html). +[connections](/docs/language/resources/provisioners/connection.html). -!> **Note:** This provisioner has been deprecated as of Terraform 0.13.4 and will be -removed in a future version of Terraform. For most common situations there are better -alternatives to using provisioners. For more information, see -[the main Provisioners page](./). +!> **Note:** This provisioner was removed in the 0.15.0 version of Terraform after being deprecated as of Terraform 0.13.4. For most common situations there are better alternatives to using provisioners. For more information, see [the main Provisioners page](./). ## Requirements diff --git a/website/docs/provisioners/remote-exec.html.markdown b/website/docs/language/resources/provisioners/remote-exec.html.md similarity index 90% rename from website/docs/provisioners/remote-exec.html.markdown rename to website/docs/language/resources/provisioners/remote-exec.html.md index 3085bb335..7962cdd00 100644 --- a/website/docs/provisioners/remote-exec.html.markdown +++ b/website/docs/language/resources/provisioners/remote-exec.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioner: remote-exec" sidebar_current: "docs-provisioners-remote" description: |- @@ -11,8 +11,8 @@ description: |- The `remote-exec` provisioner invokes a script on a remote resource after it is created. This can be used to run a configuration management tool, bootstrap into a cluster, etc. To invoke a local process, see the `local-exec` -[provisioner](/docs/provisioners/local-exec.html) instead. The `remote-exec` -provisioner supports both `ssh` and `winrm` type [connections](/docs/provisioners/connection.html). +[provisioner](/docs/language/resources/provisioners/local-exec.html) instead. The `remote-exec` +provisioner supports both `ssh` and `winrm` type [connections](/docs/language/resources/provisioners/connection.html). -> **Note:** Provisioners should only be used as a last resort. For most common situations there are better alternatives. For more information, see @@ -53,7 +53,7 @@ The following arguments are supported: You cannot pass any arguments to scripts using the `script` or `scripts` arguments to this provisioner. If you want to specify arguments, upload the script with the -[file provisioner](/docs/provisioners/file.html) +[file provisioner](/docs/language/resources/provisioners/file.html) and then use `inline` to call it. Example: ```hcl diff --git a/website/docs/provisioners/salt-masterless.html.md b/website/docs/language/resources/provisioners/salt-masterless.html.md similarity index 91% rename from website/docs/provisioners/salt-masterless.html.md rename to website/docs/language/resources/provisioners/salt-masterless.html.md index f15d10e67..6cdb4f326 100644 --- a/website/docs/provisioners/salt-masterless.html.md +++ b/website/docs/language/resources/provisioners/salt-masterless.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioner: salt-masterless" sidebar_current: "docs-provisioners-salt-masterless" description: |- @@ -11,12 +11,9 @@ description: |- Type: `salt-masterless` The `salt-masterless` Terraform provisioner provisions machines built by Terraform -using [Salt](http://saltstack.com/) states, without connecting to a Salt master. The `salt-masterless` provisioner supports `ssh` [connections](/docs/provisioners/connection.html). +using [Salt](http://saltstack.com/) states, without connecting to a Salt master. The `salt-masterless` provisioner supports `ssh` [connections](/docs/language/resources/provisioners/connection.html). -!> **Note:** This provisioner has been deprecated as of Terraform 0.13.4 and will be -removed in a future version of Terraform. For most common situations there are better -alternatives to using provisioners. For more information, see -[the main Provisioners page](./). +!> **Note:** This provisioner was removed in the 0.15.0 version of Terraform after being deprecated as of Terraform 0.13.4. For most common situations there are better alternatives to using provisioners. For more information, see [the main Provisioners page](./). ## Requirements diff --git a/website/docs/provisioners/index.html.markdown b/website/docs/language/resources/provisioners/syntax.html.md similarity index 83% rename from website/docs/provisioners/index.html.markdown rename to website/docs/language/resources/provisioners/syntax.html.md index d604b6f88..3a8eb95eb 100644 --- a/website/docs/provisioners/index.html.markdown +++ b/website/docs/language/resources/provisioners/syntax.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Provisioners" sidebar_current: "docs-provisioners" description: |- @@ -50,25 +50,25 @@ to pass data to instances at the time of their creation such that the data is immediately available on system boot. For example: * Alibaba Cloud: `user_data` on - [`alicloud_instance`](/docs/providers/alicloud/r/instance.html) - or [`alicloud_launch_template`](/docs/providers/alicloud/r/launch_template.html). + [`alicloud_instance`](https://registry.terraform.io/providers/aliyun/alicloud/latest/docs/resources/instance) + or [`alicloud_launch_template`](https://registry.terraform.io/providers/aliyun/alicloud/latest/docs/resources/launch_template). * Amazon EC2: `user_data` or `user_data_base64` on - [`aws_instance`](/docs/providers/aws/r/instance.html), - [`aws_launch_template`](/docs/providers/aws/r/launch_template.html), - and [`aws_launch_configuration`](/docs/providers/aws/r/launch_configuration.html). + [`aws_instance`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance), + [`aws_launch_template`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template), + and [`aws_launch_configuration`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_configuration). * Amazon Lightsail: `user_data` on - [`aws_lightsail_instance`](/docs/providers/aws/r/lightsail_instance.html). + [`aws_lightsail_instance`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lightsail_instance). * Microsoft Azure: `custom_data` on - [`azurerm_virtual_machine`](/docs/providers/azurerm/r/virtual_machine.html) - or [`azurerm_virtual_machine_scale_set`](/docs/providers/azurerm/r/virtual_machine_scale_set.html). + [`azurerm_virtual_machine`](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_machine) + or [`azurerm_virtual_machine_scale_set`](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_machine_scale_set). * Google Cloud Platform: `metadata` on - [`google_compute_instance`](/docs/providers/google/r/compute_instance.html) - or [`google_compute_instance_group`](/docs/providers/google/r/compute_instance_group.html). + [`google_compute_instance`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance) + or [`google_compute_instance_group`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance_group). * Oracle Cloud Infrastructure: `metadata` or `extended_metadata` on - [`oci_core_instance`](/docs/providers/oci/r/core_instance.html) - or [`oci_core_instance_configuration`](/docs/providers/oci/r/core_instance_configuration.html). + [`oci_core_instance`](https://registry.terraform.io/providers/hashicorp/oci/latest/docs/resources/core_instance) + or [`oci_core_instance_configuration`](https://registry.terraform.io/providers/hashicorp/oci/latest/docs/resources/core_instance_configuration). * VMware vSphere: Attach a virtual CDROM to - [`vsphere_virtual_machine`](/docs/providers/vsphere/r/virtual_machine.html) + [`vsphere_virtual_machine`](https://registry.terraform.io/providers/hashicorp/vsphere/latest/docs/resources/virtual_machine) using the `cdrom` block, containing a file called `user-data.txt`. Many official Linux distribution disk images include software called @@ -166,8 +166,13 @@ You must include [a `connection` block](./connection.html) so that Terraform will know how to communicate with the server. Terraform includes several built-in provisioners; use the navigation sidebar to -view their documentation. You can also install third-party provisioners in -[the user plugins directory](../configuration/providers.html#third-party-plugins). +view their documentation. + +It's also possible to use third-party provisioners as plugins, by placing them +in `%APPDATA%\terraform.d\plugins`, `~/.terraform.d/plugins`, or the same +directory where the Terraform binary is installed. However, we do not recommend +using any provisioners except the built-in `file`, `local-exec`, and +`remote-exec` provisioners. All provisioners support the `when` and `on_failure` meta-arguments, which are described below (see [Destroy-Time Provisioners](#destroy-time-provisioners) @@ -188,7 +193,11 @@ block would create a dependency cycle. ## Suppressing Provisioner Logs in CLI Output -The configuration for a `provisioner` block may use sensitive values, such as [`sensitive` variables](../configuration/variables.html#suppressing-values-in-cli-output) or [`sensitive` output values](../outputs.html#sensitive-suppressing-values-in-cli-output). In this case, all log output from the provider is automatically suppressed to prevent the sensitive values from being displayed. +The configuration for a `provisioner` block may use sensitive values, such as +[`sensitive` variables](/docs/language/values/variables.html#suppressing-values-in-cli-output) or +[`sensitive` output values](/docs/language/values/outputs.html#sensitive-suppressing-values-in-cli-output). +In this case, all log output from the provisioner is automatically suppressed to +prevent the sensitive values from being displayed. ## Creation-Time Provisioners diff --git a/website/docs/language/resources/syntax.html.md b/website/docs/language/resources/syntax.html.md new file mode 100644 index 000000000..65d52537e --- /dev/null +++ b/website/docs/language/resources/syntax.html.md @@ -0,0 +1,165 @@ +--- +layout: "language" +page_title: "Resources - Configuration Language" +sidebar_current: "docs-config-resources" +description: |- + Resources are the most important element in a Terraform configuration. + Each resource corresponds to an infrastructure object, such as a virtual + network or compute instance. +--- + +# Resource Blocks + +> **Hands-on:** Try the [Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. + +_Resources_ are the most important element in the Terraform language. +Each resource block describes one or more infrastructure objects, such +as virtual networks, compute instances, or higher-level components such +as DNS records. + +## Resource Syntax + +Resource declarations can include a number of advanced features, but only +a small subset are required for initial use. More advanced syntax features, +such as single resource declarations that produce multiple similar remote +objects, are described later in this page. + +```hcl +resource "aws_instance" "web" { + ami = "ami-a1b2c3d4" + instance_type = "t2.micro" +} +``` + +A `resource` block declares a resource of a given type ("aws_instance") +with a given local name ("web"). The name is used to refer to this resource +from elsewhere in the same Terraform module, but has no significance outside +that module's scope. + +The resource type and name together serve as an identifier for a given +resource and so must be unique within a module. + +Within the block body (between `{` and `}`) are the configuration arguments +for the resource itself. Most arguments in this section depend on the +resource type, and indeed in this example both `ami` and `instance_type` are +arguments defined specifically for [the `aws_instance` resource type](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance). + +-> **Note:** Resource names must start with a letter or underscore, and may +contain only letters, digits, underscores, and dashes. + +## Resource Types + +Each resource is associated with a single _resource type_, which determines +the kind of infrastructure object it manages and what arguments and other +attributes the resource supports. + +### Providers + +Each resource type is implemented by a [provider](/docs/language/providers/requirements.html), +which is a plugin for Terraform that offers a collection of resource types. A +provider usually provides resources to manage a single cloud or on-premises +infrastructure platform. Providers are distributed separately from Terraform +itself, but Terraform can automatically install most providers when initializing +a working directory. + +In order to manage resources, a Terraform module must specify which providers it +requires. Additionally, most providers need some configuration in order to +access their remote APIs, and the root module must provide that configuration. + +For more information, see: + +- [Provider Requirements](/docs/language/providers/requirements.html), for declaring which + providers a module uses. +- [Provider Configuration](/docs/language/providers/configuration.html), for configuring provider settings. + +Terraform usually automatically determines which provider to use based on a +resource type's name. (By convention, resource type names start with their +provider's preferred local name.) When using multiple configurations of a +provider (or non-preferred local provider names), you must use the `provider` +meta-argument to manually choose an alternate provider configuration. See +[the `provider` meta-argument](/docs/language/meta-arguments/resource-provider.html) for more details. + +### Resource Arguments + +Most of the arguments within the body of a `resource` block are specific to the +selected resource type. The resource type's documentation lists which arguments +are available and how their values should be formatted. + +The values for resource arguments can make full use of +[expressions](/docs/language/expressions/index.html) and other dynamic Terraform +language features. + +There are also some _meta-arguments_ that are defined by Terraform itself +and apply across all resource types. (See [Meta-Arguments](#meta-arguments) below.) + +### Documentation for Resource Types + +Every Terraform provider has its own documentation, describing its resource +types and their arguments. + +Most publicly available providers are distributed on the +[Terraform Registry](https://registry.terraform.io/browse/providers), which also +hosts their documentation. When viewing a provider's page on the Terraform +Registry, you can click the "Documentation" link in the header to browse its +documentation. Provider documentation on the registry is versioned, and you can +use the dropdown version menu in the header to switch which version's +documentation you are viewing. + +To browse the publicly available providers and their documentation, see +[the providers section of the Terraform Registry](https://registry.terraform.io/browse/providers). + +-> **Note:** Provider documentation used to be hosted directly on terraform.io, +as part of Terraform's core documentation. Although some provider documentation +might still be hosted here, the Terraform Registry is now the main home for all +public provider docs. + +## Resource Behavior + +For more information about how Terraform manages resources when applying a +configuration, see +[Resource Behavior](/docs/language/resources/behavior.html). + +## Meta-Arguments + +The Terraform language defines several meta-arguments, which can be used with +any resource type to change the behavior of resources. + +The following meta-arguments are documented on separate pages: + +- [`depends_on`, for specifying hidden dependencies](/docs/language/meta-arguments/depends_on.html) +- [`count`, for creating multiple resource instances according to a count](/docs/language/meta-arguments/count.html) +- [`for_each`, to create multiple instances according to a map, or set of strings](/docs/language/meta-arguments/for_each.html) +- [`provider`, for selecting a non-default provider configuration](/docs/language/meta-arguments/resource-provider.html) +- [`lifecycle`, for lifecycle customizations](/docs/language/meta-arguments/lifecycle.html) +- [`provisioner` and `connection`, for taking extra actions after resource creation](/docs/language/resources/provisioners/index.html) + +## Operation Timeouts + +Some resource types provide a special `timeouts` nested block argument that +allows you to customize how long certain operations are allowed to take +before being considered to have failed. +For example, [`aws_db_instance`](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/db_instance) +allows configurable timeouts for `create`, `update` and `delete` operations. + +Timeouts are handled entirely by the resource type implementation in the +provider, but resource types offering these features follow the convention +of defining a child block called `timeouts` that has a nested argument +named after each operation that has a configurable timeout value. +Each of these arguments takes a string representation of a duration, such +as `"60m"` for 60 minutes, `"10s"` for ten seconds, or `"2h"` for two hours. + +```hcl +resource "aws_db_instance" "example" { + # ... + + timeouts { + create = "60m" + delete = "2h" + } +} +``` + +The set of configurable operations is chosen by each resource type. Most +resource types do not support the `timeouts` block at all. Consult the +documentation for each resource type to see which operations it offers +for configuration, if any. diff --git a/website/docs/backends/types/artifactory.html.md b/website/docs/language/settings/backends/artifactory.html.md similarity index 98% rename from website/docs/backends/types/artifactory.html.md rename to website/docs/language/settings/backends/artifactory.html.md index ce30ae716..2e19dea89 100644 --- a/website/docs/backends/types/artifactory.html.md +++ b/website/docs/language/settings/backends/artifactory.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: artifactory" sidebar_current: "docs-backends-types-standard-artifactory" description: |- diff --git a/website/docs/backends/types/azurerm.html.md b/website/docs/language/settings/backends/azurerm.html.md similarity index 94% rename from website/docs/backends/types/azurerm.html.md rename to website/docs/language/settings/backends/azurerm.html.md index 72da9c098..0286a8b09 100644 --- a/website/docs/backends/types/azurerm.html.md +++ b/website/docs/language/settings/backends/azurerm.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: azurerm" sidebar_current: "docs-backends-types-standard-azurerm" description: |- @@ -75,11 +75,11 @@ terraform { } ``` --> **NOTE:** When using a Service Principal or an Access Key - we recommend using a [Partial Configuration](/docs/backends/config.html) for the credentials. +-> **NOTE:** When using a Service Principal or an Access Key - we recommend using a [Partial Configuration](/docs/language/settings/backends/configuration.html#partial-configuration) for the credentials. ## Data Source Configuration -When authenticating using a Service Principall (either with a Client Certificate or a Client Secret): +When authenticating using a Service Principal (either with a Client Certificate or a Client Secret): ```hcl data "terraform_remote_state" "foo" { @@ -146,9 +146,9 @@ data "terraform_remote_state" "foo" { The following configuration options are supported: -* `storage_account_name` - (Required) The Name of [the Storage Account](https://www.terraform.io/docs/providers/azurerm/r/storage_account.html). +* `storage_account_name` - (Required) The Name of [the Storage Account](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/storage_account). -* `container_name` - (Required) The Name of [the Storage Container](https://www.terraform.io/docs/providers/azurerm/r/storage_container.html) within the Storage Account. +* `container_name` - (Required) The Name of [the Storage Container](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/storage_container) within the Storage Account. * `key` - (Required) The name of the Blob used to retrieve/store Terraform's State file inside the Storage Container. diff --git a/website/docs/configuration/backend.html.md b/website/docs/language/settings/backends/configuration.html.md similarity index 90% rename from website/docs/configuration/backend.html.md rename to website/docs/language/settings/backends/configuration.html.md index 35e6b0094..23bd91b0c 100644 --- a/website/docs/configuration/backend.html.md +++ b/website/docs/language/settings/backends/configuration.html.md @@ -1,17 +1,13 @@ --- -layout: "docs" +layout: "language" page_title: "Backend Configuration - Configuration Language" --- # Backend Configuration --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Terraform Settings](../configuration-0-11/terraform.html). - Each Terraform configuration can specify a backend, which defines exactly where -and how operations are performed, where [state](/docs/state/index.html) +and how operations are performed, where [state](/docs/language/state/index.html) snapshots are stored, etc. Most non-trivial Terraform configurations configure a remote backend so that multiple people can work with the same infrastructure. @@ -45,7 +41,7 @@ The arguments used in the block's body are specific to the chosen backend type; Some backends allow providing access credentials directly as part of the configuration for use in unusual situations, for pragmatic reasons. However, in normal use we _do not_ recommend including access credentials as part of the backend configuration. Instead, leave those arguments completely unset and provide credentials via the credentials files or environment variables that are conventional for the target system, as described in the documentation for each backend. -See _[Backend Types](/docs/backends/types/index.html)_ for details about each supported backend type and its configuration arguments. +See the list of backend types in the navigation sidebar for details about each supported backend type and its configuration arguments. ### Default Backend @@ -75,7 +71,7 @@ the arguments are omitted, we call this a _partial configuration_. With a partial configuration, the remaining configuration arguments must be provided as part of -[the initialization process](/docs/backends/init.html#backend-initialization). +[the initialization process](/docs/cli/init/index.html). There are several ways to supply the remaining arguments: * **File**: A configuration file may be specified via the `init` command line. @@ -149,12 +145,12 @@ both the configuration itself as well as the type of backend (for example from "consul" to "s3"). Terraform will automatically detect any changes in your configuration -and request a [reinitialization](/docs/backends/init.html). As part of +and request a [reinitialization](/docs/cli/init/index.html). As part of the reinitialization process, Terraform will ask if you'd like to migrate your existing state to the new configuration. This allows you to easily switch from one backend to another. -If you're using multiple [workspaces](/docs/state/workspaces.html), +If you're using multiple [workspaces](/docs/language/state/workspaces.html), Terraform can copy all workspaces to the destination. If Terraform detects you have multiple workspaces, it will ask if this is what you want to do. @@ -165,7 +161,7 @@ want to migrate your state. You can respond "no" in this scenario. If you no longer want to use any backend, you can simply remove the configuration from the file. Terraform will detect this like any other -change and prompt you to [reinitialize](/docs/backends/init.html). +change and prompt you to [reinitialize](/docs/cli/init/index.html). As part of the reinitialization, Terraform will ask if you'd like to migrate your state back down to normal local state. Once this is complete then diff --git a/website/docs/backends/types/consul.html.md b/website/docs/language/settings/backends/consul.html.md similarity index 92% rename from website/docs/backends/types/consul.html.md rename to website/docs/language/settings/backends/consul.html.md index 740d97502..d2f921454 100644 --- a/website/docs/backends/types/consul.html.md +++ b/website/docs/language/settings/backends/consul.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: consul" sidebar_current: "docs-backends-types-standard-consul" description: |- @@ -12,7 +12,7 @@ description: |- Stores the state in the [Consul](https://www.consul.io/) KV store at a given path. -This backend supports [state locking](/docs/state/locking.html). +This backend supports [state locking](/docs/language/state/locking.html). ## Example Configuration @@ -27,7 +27,7 @@ terraform { ``` Note that for the access credentials we recommend using a -[partial configuration](/docs/backends/config.html). +[partial configuration](/docs/language/settings/backends/configuration.html#partial-configuration). ## Data Source Configuration diff --git a/website/docs/backends/types/cos.html.md b/website/docs/language/settings/backends/cos.html.md similarity index 85% rename from website/docs/backends/types/cos.html.md rename to website/docs/language/settings/backends/cos.html.md index bf1d09945..cef18670c 100644 --- a/website/docs/backends/types/cos.html.md +++ b/website/docs/language/settings/backends/cos.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: cos" sidebar_current: "docs-backends-types-standard-cos" description: |- @@ -11,7 +11,7 @@ description: |- **Kind: Standard (with locking)** Stores the state as an object in a configurable prefix in a given bucket on [Tencent Cloud Object Storage](https://intl.cloud.tencent.com/product/cos) (COS). -This backend also supports [state locking](/docs/state/locking.html). +This backend also supports [state locking](/docs/language/state/locking.html). ~> **Warning!** It is highly recommended that you enable [Object Versioning](https://intl.cloud.tencent.com/document/product/436/19883) on the COS bucket to allow for state recovery in the case of accidental deletions and human error. @@ -28,12 +28,12 @@ terraform { } ``` -This assumes we have a [COS Bucket](https://www.terraform.io/docs/providers/tencentcloud/r/cos_bucket.html) created named `bucket-for-terraform-state-1258798060`, +This assumes we have a [COS Bucket](https://registry.terraform.io/providers/tencentcloudstack/tencentcloud/latest/docs/resources/cos_bucket) created named `bucket-for-terraform-state-1258798060`, Terraform state will be written into the file `terraform/state/terraform.tfstate`. ## Data Source Configuration -To make use of the COS remote state in another configuration, use the [`terraform_remote_state` data source](/docs/providers/terraform/d/remote_state.html). +To make use of the COS remote state in another configuration, use the [`terraform_remote_state` data source](/docs/language/state/remote-state-data.html). ```hcl data "terraform_remote_state" "foo" { diff --git a/website/docs/backends/types/etcd.html.md b/website/docs/language/settings/backends/etcd.html.md similarity index 97% rename from website/docs/backends/types/etcd.html.md rename to website/docs/language/settings/backends/etcd.html.md index 302d5486b..792b7354c 100644 --- a/website/docs/backends/types/etcd.html.md +++ b/website/docs/language/settings/backends/etcd.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: etcd" sidebar_current: "docs-backends-types-standard-etcdv2" description: |- diff --git a/website/docs/backends/types/etcdv3.html.md b/website/docs/language/settings/backends/etcdv3.html.md similarity index 89% rename from website/docs/backends/types/etcdv3.html.md rename to website/docs/language/settings/backends/etcdv3.html.md index 43257c8ce..7c31d5125 100644 --- a/website/docs/backends/types/etcdv3.html.md +++ b/website/docs/language/settings/backends/etcdv3.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: etcdv3" sidebar_current: "docs-backends-types-standard-etcdv3" description: |- @@ -12,7 +12,7 @@ description: |- Stores the state in the [etcd](https://coreos.com/etcd/) KV store with a given prefix. -This backend supports [state locking](/docs/state/locking.html). +This backend supports [state locking](/docs/language/state/locking.html). ## Example Configuration @@ -27,7 +27,7 @@ terraform { ``` Note that for the access credentials we recommend using a -[partial configuration](/docs/backends/config.html). +[partial configuration](/docs/language/settings/backends/configuration.html#partial-configuration). ## Data Source Configuration diff --git a/website/docs/language/settings/backends/gcs.html.md b/website/docs/language/settings/backends/gcs.html.md new file mode 100644 index 000000000..1d869579e --- /dev/null +++ b/website/docs/language/settings/backends/gcs.html.md @@ -0,0 +1,107 @@ +--- +layout: "language" +page_title: "Backend Type: gcs" +sidebar_current: "docs-backends-types-standard-gcs" +description: |- + Terraform can store the state remotely, making it easier to version and work with in a team. +--- + +# gcs + +**Kind: Standard (with locking)** + +Stores the state as an object in a configurable prefix in a pre-existing bucket on [Google Cloud Storage](https://cloud.google.com/storage/) (GCS). +This backend also supports [state locking](/docs/language/state/locking.html). The bucket must exist prior to configuring the backend. + +~> **Warning!** It is highly recommended that you enable +[Object Versioning](https://cloud.google.com/storage/docs/object-versioning) +on the GCS bucket to allow for state recovery in the case of accidental deletions and human error. + +## Example Configuration + +```hcl +terraform { + backend "gcs" { + bucket = "tf-state-prod" + prefix = "terraform/state" + } +} +``` + +## Data Source Configuration + +```hcl +data "terraform_remote_state" "foo" { + backend = "gcs" + config = { + bucket = "terraform-state" + prefix = "prod" + } +} + +resource "template_file" "bar" { + template = "${greeting}" + + vars { + greeting = "${data.terraform_remote_state.foo.greeting}" + } +} +``` + +## Authentication + +IAM Changes to buckets are [eventually consistent](https://cloud.google.com/storage/docs/consistency#eventually_consistent_operations) and may take upto a few minutes to take effect. Terraform will return 403 errors till it is eventually consistent. + +### Running Terraform on your workstation. + +If you are using terraform on your workstation, you will need to install the Google Cloud SDK and authenticate using [User Application Default +Credentials](https://cloud.google.com/sdk/gcloud/reference/auth/application-default). + +User ADCs do [expire](https://developers.google.com/identity/protocols/oauth2#expiration) and you can refresh them by running `gcloud auth application-default login`. + +### Running Terraform on Google Cloud + +If you are running terraform on Google Cloud, you can configure that instance or cluster to use a [Google Service +Account](https://cloud.google.com/compute/docs/authentication). This will allow Terraform to authenticate to Google Cloud without having to bake in a separate +credential/authentication file. Make sure that the scope of the VM/Cluster is set to cloud-platform. + +### Running Terraform outside of Google Cloud + +If you are running terraform outside of Google Cloud, generate a service account key and set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to +the path of the service account key. Terraform will use that key for authentication. + +### Impersonating Service Accounts + +Terraform can impersonate a Google Service Account as described [here](https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials). A valid credential must be provided as mentioned in the earlier section and that identity must have the `roles/iam.serviceAccountTokenCreator` role on the service account you are impersonating. + +## Configuration variables + +The following configuration options are supported: + + * `bucket` - (Required) The name of the GCS bucket. This name must be + globally unique. For more information, see [Bucket Naming + Guidelines](https://cloud.google.com/storage/docs/bucketnaming.html#requirements). + * `credentials` / `GOOGLE_BACKEND_CREDENTIALS` / `GOOGLE_CREDENTIALS` - + (Optional) Local path to Google Cloud Platform account credentials in JSON + format. If unset, [Google Application Default + Credentials](https://developers.google.com/identity/protocols/application-default-credentials) + are used. The provided credentials must have Storage Object Admin role on the bucket. + **Warning**: if using the Google Cloud Platform provider as well, it will + also pick up the `GOOGLE_CREDENTIALS` environment variable. + * `impersonate_service_account` - (Optional) The service account to impersonate for accessing the State Bucket. + You must have `roles/iam.serviceAccountTokenCreator` role on that account for the impersonation to succeed. + If you are using a delegation chain, you can specify that using the `impersonate_service_account_delegates` field. + Alternatively, this can be specified using the `GOOGLE_IMPERSONATE_SERVICE_ACCOUNT` environment + variable. + * `impersonate_service_account_delegates` - (Optional) The delegation chain for an impersonating a service account as described [here](https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials#sa-credentials-delegated). + * `access_token` - (Optional) A temporary [OAuth 2.0 access token] obtained + from the Google Authorization server, i.e. the `Authorization: Bearer` token + used to authenticate HTTP requests to GCP APIs. This is an alternative to + `credentials`. If both are specified, `access_token` will be used over the + `credentials` field. + * `prefix` - (Optional) GCS prefix inside the bucket. Named states for + workspaces are stored in an object called `/.tfstate`. + * `encryption_key` / `GOOGLE_ENCRYPTION_KEY` - (Optional) A 32 byte base64 + encoded 'customer supplied encryption key' used to encrypt all state. For + more information see [Customer Supplied Encryption + Keys](https://cloud.google.com/storage/docs/encryption#customer-supplied). diff --git a/website/docs/backends/types/http.html.md b/website/docs/language/settings/backends/http.html.md similarity index 99% rename from website/docs/backends/types/http.html.md rename to website/docs/language/settings/backends/http.html.md index e0f3e7488..e87903c44 100644 --- a/website/docs/backends/types/http.html.md +++ b/website/docs/language/settings/backends/http.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: http" sidebar_current: "docs-backends-types-standard-http" description: |- diff --git a/website/docs/language/settings/backends/index.html.md b/website/docs/language/settings/backends/index.html.md new file mode 100644 index 000000000..6e477c5ef --- /dev/null +++ b/website/docs/language/settings/backends/index.html.md @@ -0,0 +1,110 @@ +--- +layout: "language" +page_title: "Backend Overview - Configuration Language" +--- + +# Backends + +Each Terraform configuration can specify a backend, which defines where +and how operations are performed, where [state](/docs/language/state/index.html) +snapshots are stored, etc. + +The rest of this page introduces the concept of backends; the other pages in +this section document how to configure and use backends. + +- [Backend Configuration](/docs/language/settings/backends/configuration.html) documents the form + of a `backend` block, which selects and configures a backend for a + Terraform configuration. +- This section also includes a page for each of Terraform's built-in backends, + documenting its behavior and available settings. See the navigation sidebar + for a complete list. + +## Recommended Backends + +- If you are still learning how to use Terraform, we recommend using the default + `local` backend, which requires no configuration. +- If you and your team are using Terraform to manage meaningful infrastructure, + we recommend using the `remote` backend with [Terraform Cloud](/docs/cloud/index.html) + or [Terraform Enterprise](/docs/enterprise/index.html). + +## Where Backends are Used + +Backend configuration is only used by [Terraform CLI](/docs/cli/index.html). +Terraform Cloud and Terraform Enterprise always use their own state storage when +performing Terraform runs, so they ignore any backend block in the +configuration. + +But since it's common to +[use Terraform CLI alongside Terraform Cloud](/docs/cloud/run/cli.html) +(and since certain state operations, like [tainting](/docs/cli/commands/taint.html), +can only be performed on the CLI), we recommend that Terraform Cloud users +include a backend block in their configurations and configure the `remote` +backend to use the relevant Terraform Cloud workspace(s). + +## Where Backends Come From + +Terraform includes a built-in selection of backends; this selection has changed +over time, but does not change very often. + +The built-in backends are the only backends. You cannot load additional backends +as plugins. + +## What Backends Do + +There are two areas of Terraform's behavior that are determined by the backend: + +- Where state is stored. +- Where operations are performed. + +### State + +Terraform uses persistent [state](/docs/language/state/index.html) data to keep track of +the resources it manages. Since it needs the state in order to know which +real-world infrastructure objects correspond to the resources in a +configuration, everyone working with a given collection of infrastructure +resources must be able to access the same state data. + +The `local` backend stores state as a local file on disk, but every other +backend stores state in a remote service of some kind, which allows multiple +people to access it. Accessing state in a remote service generally requires some +kind of access credentials, since state data contains extremely sensitive +information. + +Some backends act like plain "remote disks" for state files; others support +_locking_ the state while operations are being performed, which helps prevent +conflicts and inconsistencies. + +### Operations + +"Operations" refers to performing API requests against infrastructure services +in order to create, read, update, or destroy resources. Not every `terraform` +subcommand performs API operations; many of them only operate on state data. + +Only two backends actually perform operations: `local` and `remote`. + +The `local` backend performs API operations directly from the machine where the +`terraform` command is run. Whenever you use a backend other than `local` or +`remote`, Terraform uses the `local` backend for operations; it only uses the +configured backend for state storage. + +The `remote` backend can perform API operations remotely, using Terraform Cloud +or Terraform Enterprise. When running remote operations, the local `terraform` +command displays the output of the remote actions as though they were being +performed locally, but only the remote system requires cloud credentials or +network access to the resources being managed. + +Remote operations are optional for the `remote` backend; the settings for the +target Terraform Cloud workspace determine whether operations run remotely or +locally. If local operations are configured, Terraform uses the `remote` backend +for state and the `local` backend for operations, like with the other state +backends. + +### Backend Types + +Terraform's backends are divided into two main types, according to how they +handle state and operations: + +- **Enhanced** backends can both store state and perform operations. There are + only two enhanced backends: `local` and `remote`. +- **Standard** backends only store state, and rely on the `local` backend for + performing operations. diff --git a/website/docs/backends/types/kubernetes.html.md b/website/docs/language/settings/backends/kubernetes.html.md similarity index 97% rename from website/docs/backends/types/kubernetes.html.md rename to website/docs/language/settings/backends/kubernetes.html.md index 80ef08763..715db82c5 100644 --- a/website/docs/backends/types/kubernetes.html.md +++ b/website/docs/language/settings/backends/kubernetes.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: Kubernetes" sidebar_current: "docs-backends-types-standard-kubernetes" description: |- @@ -33,7 +33,7 @@ If the `in_cluster_config` flag is set the backend will attempt to use a [servic For most use cases either `in_cluster_config` or `load_config_file` will need to be set to `true`. If both flags are set the configuration from `load_config_file` will be used. -Note that for the access credentials we recommend using a [partial configuration](/docs/backends/config.html#partial-configuration). +Note that for the access credentials we recommend using a [partial configuration](/docs/language/settings/backends/configuration.html#partial-configuration). ## Example Referencing diff --git a/website/docs/backends/types/local.html.md b/website/docs/language/settings/backends/local.html.md similarity index 97% rename from website/docs/backends/types/local.html.md rename to website/docs/language/settings/backends/local.html.md index b8a690cb6..225e6ac27 100644 --- a/website/docs/backends/types/local.html.md +++ b/website/docs/language/settings/backends/local.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: local" sidebar_current: "docs-backends-types-enhanced-local" description: |- diff --git a/website/docs/backends/types/manta.html.md b/website/docs/language/settings/backends/manta.html.md similarity index 95% rename from website/docs/backends/types/manta.html.md rename to website/docs/language/settings/backends/manta.html.md index 926891618..ef82e5602 100644 --- a/website/docs/backends/types/manta.html.md +++ b/website/docs/language/settings/backends/manta.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: manta" sidebar_current: "docs-backends-types-standard-manta" description: |- @@ -24,7 +24,7 @@ terraform { ``` Note that for the access credentials we recommend using a -[partial configuration](/docs/backends/config.html). +[partial configuration](/docs/language/settings/backends/configuration.html#partial-configuration). ## Data Source Configuration diff --git a/website/docs/backends/types/oss.html.md b/website/docs/language/settings/backends/oss.html.md similarity index 91% rename from website/docs/backends/types/oss.html.md rename to website/docs/language/settings/backends/oss.html.md index c15ebd064..6acc16af1 100644 --- a/website/docs/backends/types/oss.html.md +++ b/website/docs/language/settings/backends/oss.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: oss" sidebar_current: "docs-backends-types-standard-oss" description: |- @@ -33,9 +33,9 @@ terraform { } ``` -This assumes we have a [OSS Bucket](https://www.terraform.io/docs/providers/alicloud/r/oss_bucket.html) created called `bucket-for-terraform-state`, -a [OTS Instance](https://www.terraform.io/docs/providers/alicloud/r/ots_instance.html) called `terraform-remote` and -a [OTS TableStore](https://www.terraform.io/docs/providers/alicloud/r/ots_table.html) called `statelock`. The +This assumes we have a [OSS Bucket](https://registry.terraform.io/providers/aliyun/alicloud/latest/docs/resources/oss_bucket) created called `bucket-for-terraform-state`, +a [OTS Instance](https://registry.terraform.io/providers/aliyun/alicloud/latest/docs/resources/ots_instance) called `terraform-remote` and +a [OTS TableStore](https://registry.terraform.io/providers/aliyun/alicloud/latest/docs/resources/ots_table) called `statelock`. The Terraform state will be written into the file `path/mystate/version-1.tfstate`. The `TableStore` must have a primary key named `LockID` of type `String`. @@ -43,7 +43,7 @@ Terraform state will be written into the file `path/mystate/version-1.tfstate`. To make use of the OSS remote state in another configuration, use the [`terraform_remote_state` data -source](/docs/providers/terraform/d/remote_state.html). +source](/docs/language/state/remote-state-data.html). ```hcl terraform { diff --git a/website/docs/backends/types/pg.html.md b/website/docs/language/settings/backends/pg.html.md similarity index 76% rename from website/docs/backends/types/pg.html.md rename to website/docs/language/settings/backends/pg.html.md index 7dd60e582..362443cfb 100644 --- a/website/docs/backends/types/pg.html.md +++ b/website/docs/language/settings/backends/pg.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: pg" sidebar_current: "docs-backends-types-standard-pg" description: |- @@ -12,7 +12,7 @@ description: |- Stores the state in a [Postgres database](https://www.postgresql.org) version 9.5 or newer. -This backend supports [state locking](/docs/state/locking.html). +This backend supports [state locking](/docs/language/state/locking.html). ## Example Configuration @@ -32,7 +32,9 @@ createdb terraform_backend This `createdb` command is found in [Postgres client applications](https://www.postgresql.org/docs/9.5/reference-client.html) which are installed along with the database server. -We recommend using a [partial configuration](/docs/backends/config.html#partial-configuration) for the `conn_str` variable, because it typically contains access credentials that should not be committed to source control: +We recommend using a +[partial configuration](/docs/language/settings/backends/configuration.html#partial-configuration) +for the `conn_str` variable, because it typically contains access credentials that should not be committed to source control: ```hcl terraform { @@ -54,7 +56,7 @@ terraform init -backend-config="conn_str=postgres://localhost/terraform_backend? ## Data Source Configuration -To make use of the pg remote state in another configuration, use the [`terraform_remote_state` data source](/docs/providers/terraform/d/remote_state.html). +To make use of the pg remote state in another configuration, use the [`terraform_remote_state` data source](/docs/language/state/remote-state-data.html). ```hcl data "terraform_remote_state" "network" { @@ -81,9 +83,9 @@ Postgres version 9.5 or newer is required to support advisory locks and the "ON This backend creates one table **states** in the automatically-managed Postgres schema configured by the `schema_name` variable. -The table is keyed by the [workspace](/docs/state/workspaces.html) name. If workspaces are not in use, the name `default` is used. +The table is keyed by the [workspace](/docs/language/state/workspaces.html) name. If workspaces are not in use, the name `default` is used. -Locking is supported using [Postgres advisory locks](https://www.postgresql.org/docs/9.5/explicit-locking.html#ADVISORY-LOCKS). [`force-unlock`](https://www.terraform.io/docs/commands/force-unlock.html) is not supported, because these database-native locks will automatically unlock when the session is aborted or the connection fails. To see outstanding locks in a Postgres server, use the [`pg_locks` system view](https://www.postgresql.org/docs/9.5/view-pg-locks.html). +Locking is supported using [Postgres advisory locks](https://www.postgresql.org/docs/9.5/explicit-locking.html#ADVISORY-LOCKS). [`force-unlock`](https://www.terraform.io/docs/cli/commands/force-unlock.html) is not supported, because these database-native locks will automatically unlock when the session is aborted or the connection fails. To see outstanding locks in a Postgres server, use the [`pg_locks` system view](https://www.postgresql.org/docs/9.5/view-pg-locks.html). The **states** table contains: diff --git a/website/docs/backends/types/remote.html.md b/website/docs/language/settings/backends/remote.html.md similarity index 92% rename from website/docs/backends/types/remote.html.md rename to website/docs/language/settings/backends/remote.html.md index 1af0313cd..83350eae9 100644 --- a/website/docs/backends/types/remote.html.md +++ b/website/docs/language/settings/backends/remote.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: remote" sidebar_current: "docs-backends-types-enhanced-remote" description: |- @@ -28,7 +28,7 @@ Currently the remote backend supports the following Terraform commands: - `apply` - `console` (supported in Terraform >= v0.11.12) -- `destroy` (requires manually setting `CONFIRM_DESTROY=1` on the workspace) +- `destroy` - `fmt` - `get` - `graph` (supported in Terraform >= v0.11.12) @@ -59,7 +59,7 @@ determines which mode it uses: all of the desired remote workspace names. For example, set `prefix = "networking-"` to use Terraform cloud workspaces with names like `networking-dev` and `networking-prod`. This is helpful when - mapping multiple Terraform CLI [workspaces](../../state/workspaces.html) + mapping multiple Terraform CLI [workspaces](/docs/language/state/workspaces.html) used in a single Terraform configuration to multiple Terraform Cloud workspaces. @@ -70,7 +70,7 @@ the Terraform CLI workspace `prod` within the current configuration. Remote Terraform operations such as `plan` and `apply` executed against that Terraform CLI workspace will be executed in the Terraform Cloud workspace `networking-prod`. -Additionally, the [`${terraform.workspace}`](../../state/workspaces.html#current-workspace-interpolation) +Additionally, the [`${terraform.workspace}`](/docs/language/state/workspaces.html#current-workspace-interpolation) interpolation sequence should be removed from Terraform configurations that run remote operations against Terraform Cloud workspaces. The reason for this is that each Terraform Cloud workspace currently only uses the single `default` Terraform @@ -94,8 +94,8 @@ running any remote operations against them. ## Example Configurations -> **Note:** We recommend omitting the token from the configuration, and instead using - [`terraform login`](/docs/commands/login.html) or manually configuring - `credentials` in the [CLI config file](/docs/commands/cli-config.html#credentials). + [`terraform login`](/docs/cli/commands/login.html) or manually configuring + `credentials` in the [CLI config file](/docs/cli/config/config-file.html#credentials). ### Basic Configuration @@ -177,9 +177,9 @@ The following configuration options are supported: targeted workspace(s). * `token` - (Optional) The token used to authenticate with the remote backend. We recommend omitting the token from the configuration, and instead using - [`terraform login`](/docs/commands/login.html) or manually configuring + [`terraform login`](/docs/cli/commands/login.html) or manually configuring `credentials` in the - [CLI config file](/docs/commands/cli-config.html#credentials). + [CLI config file](/docs/cli/config/config-file.html#credentials). * `workspaces` - (Required) A block specifying which remote workspace(s) to use. The `workspaces` block supports the following keys: diff --git a/website/docs/backends/types/s3.html.md b/website/docs/language/settings/backends/s3.html.md similarity index 97% rename from website/docs/backends/types/s3.html.md rename to website/docs/language/settings/backends/s3.html.md index 31b3a7ac9..678cb24b7 100644 --- a/website/docs/backends/types/s3.html.md +++ b/website/docs/language/settings/backends/s3.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: s3" sidebar_current: "docs-backends-types-standard-s3" description: |- @@ -37,7 +37,7 @@ This assumes we have a bucket created called `mybucket`. The Terraform state is written to the key `path/to/my/key`. Note that for the access credentials we recommend using a -[partial configuration](/docs/backends/config.html). +[partial configuration](/docs/language/settings/backends/configuration.html#partial-configuration). ### S3 Bucket Permissions @@ -107,7 +107,7 @@ This is seen in the following AWS IAM Statement: To make use of the S3 remote state in another configuration, use the [`terraform_remote_state` data -source](/docs/providers/terraform/d/remote_state.html). +source](/docs/language/state/remote-state-data.html). ```hcl data "terraform_remote_state" "network" { @@ -182,7 +182,7 @@ The following configuration is optional: The following configuration is required: * `bucket` - (Required) Name of the S3 Bucket. -* `key` - (Required) Path to the state file inside the S3 Bucket. When using a non-default [workspace](/docs/state/workspaces.html), the state path will be `/workspace_key_prefix/workspace_name/key` (see also the `workspace_key_prefix` configuration). +* `key` - (Required) Path to the state file inside the S3 Bucket. When using a non-default [workspace](/docs/language/state/workspaces.html), the state path will be `/workspace_key_prefix/workspace_name/key` (see also the `workspace_key_prefix` configuration). The following configuration is optional: @@ -214,7 +214,7 @@ The S3 backend can be used in a number of different ways that make different tradeoffs between convenience, security, and isolation in such an organization. This section describes one such approach that aims to find a good compromise between these tradeoffs, allowing use of -[Terraform's workspaces feature](/docs/state/workspaces.html) to switch +[Terraform's workspaces feature](/docs/language/state/workspaces.html) to switch conveniently between multiple isolated deployments of the same configuration. Use this section as a starting-point for your approach, but note that @@ -320,7 +320,7 @@ provider "aws" { If workspace IAM roles are centrally managed and shared across many separate Terraform configurations, the role ARNs could also be obtained via a data -source such as [`terraform_remote_state`](/docs/providers/terraform/d/remote_state.html) +source such as [`terraform_remote_state`](/docs/language/state/remote-state-data.html) to avoid repeating these values. ### Creating and Selecting Workspaces diff --git a/website/docs/backends/types/swift.html.md b/website/docs/language/settings/backends/swift.html.md similarity index 96% rename from website/docs/backends/types/swift.html.md rename to website/docs/language/settings/backends/swift.html.md index 57ab752eb..1d8241ae4 100644 --- a/website/docs/backends/types/swift.html.md +++ b/website/docs/language/settings/backends/swift.html.md @@ -1,5 +1,5 @@ --- -layout: "backend-types" +layout: "language" page_title: "Backend Type: swift" sidebar_current: "docs-backends-types-standard-swift" description: |- @@ -12,7 +12,7 @@ description: |- Stores the state as an artifact in [Swift](http://docs.openstack.org/developer/swift/latest/). -~> Warning! It is highly recommended that you enable [Object Versioning](https://docs.openstack.org/developer/swift/latest/overview_object_versioning.html) by setting the [`archive_container`](https://www.terraform.io/docs/backends/types/swift.html#archive_container) configuration. This allows for state recovery in the case of accidental deletions and human error. +~> Warning! It is highly recommended that you enable [Object Versioning](https://docs.openstack.org/developer/swift/latest/overview_object_versioning.html) by setting the [`archive_container`](https://www.terraform.io/docs/language/settings/backends/swift.html#archive_container) configuration. This allows for state recovery in the case of accidental deletions and human error. ## Example Configuration @@ -27,7 +27,7 @@ terraform { This will create a container called `terraform-state` and an object within that container called `tfstate.tf`. It will enable versioning using the `terraform-state-archive` container to contain the older version. For the access credentials we recommend using a -[partial configuration](/docs/backends/config.html). +[partial configuration](/docs/language/settings/backends/configuration.html#partial-configuration). ## Data Source Configuration diff --git a/website/docs/configuration/terraform.html.md b/website/docs/language/settings/index.html.md similarity index 89% rename from website/docs/configuration/terraform.html.md rename to website/docs/language/settings/index.html.md index 4367710bd..c9a79a865 100644 --- a/website/docs/configuration/terraform.html.md +++ b/website/docs/language/settings/index.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Terraform Settings - Configuration Language" sidebar_current: "docs-config-terraform" description: |- @@ -9,10 +9,6 @@ description: |- # Terraform Settings --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Terraform Settings](../configuration-0-11/terraform.html). - The special `terraform` configuration block type is used to configure some behaviors of Terraform itself, such as requiring a minimum Terraform version to apply your configuration. @@ -40,18 +36,18 @@ following sections. The nested `backend` block configures which backend Terraform should use. The syntax and behavior of the `backend` block is described in [Backend -Configuration](./backend.html). +Configuration](/docs/language/settings/backends/configuration.html). ## Specifying a Required Terraform Version The `required_version` setting accepts a [version constraint -string,](./version-constraints.html) which specifies which versions of Terraform +string,](/docs/language/expressions/version-constraints.html) which specifies which versions of Terraform can be used with your configuration. If the running version of Terraform doesn't match the constraints specified, Terraform will produce an error and exit without taking any further actions. -When you use [child modules](./modules.html), each module can specify its own +When you use [child modules](/docs/language/modules/index.html), each module can specify its own version requirements. The requirements of all modules in the tree must be satisfied. @@ -62,7 +58,7 @@ a minimum Terraform version that has behavior expected by the configuration. The `required_version` setting applies only to the version of Terraform CLI. Terraform's resource types are implemented by provider plugins, whose release cycles are independent of Terraform CLI and of each other. -Use [the `required_providers` block](./provider-requirements.html) to manage +Use [the `required_providers` block](/docs/language/providers/requirements.html) to manage the expected versions for each provider you use. ## Specifying Provider Requirements @@ -84,7 +80,7 @@ terraform { } ``` -For more information, see [Provider Requirements](./provider-requirements.html). +For more information, see [Provider Requirements](/docs/language/providers/requirements.html). ## Experimental Language Features diff --git a/website/docs/backends/state.html.md b/website/docs/language/state/backends.html.md similarity index 86% rename from website/docs/backends/state.html.md rename to website/docs/language/state/backends.html.md index 936367201..da957173d 100644 --- a/website/docs/backends/state.html.md +++ b/website/docs/language/state/backends.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Backends: State Storage and Locking" sidebar_current: "docs-backends-state" description: |- @@ -9,7 +9,7 @@ description: |- # State Storage and Locking Backends are responsible for storing state and providing an API for -[state locking](/docs/state/locking.html). State locking is optional. +[state locking](/docs/language/state/locking.html). State locking is optional. Despite the state being stored remotely, all Terraform commands such as `terraform console`, the `terraform state` operations, `terraform taint`, @@ -64,10 +64,12 @@ prior to forcing the overwrite. ## State Locking -Backends are responsible for supporting [state locking](/docs/state/locking.html) -if possible. Not all backend types support state locking. In the -[list of supported backend types](/docs/backends/types) we explicitly note -whether locking is supported. +Backends are responsible for supporting [state locking](/docs/language/state/locking.html) +if possible. + +Not all backends support locking. The +[documentation for each backend](/docs/language/settings/backends/index.html) +includes details on whether it supports locking or not. For more information on state locking, view the -[page dedicated to state locking](/docs/state/locking.html). +[page dedicated to state locking](/docs/language/state/locking.html). diff --git a/website/docs/state/import.html.md b/website/docs/language/state/import.html.md similarity index 85% rename from website/docs/state/import.html.md rename to website/docs/language/state/import.html.md index 166614b66..1334269e9 100644 --- a/website/docs/state/import.html.md +++ b/website/docs/language/state/import.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "State: Import Existing Resources" sidebar_current: "docs-state-import" description: |- @@ -12,4 +12,4 @@ Terraform is able to import existing infrastructure. This allows you take resources you've created by some other means and bring it under Terraform management. To learn more about this, please visit the -[pages dedicated to import](/docs/import/index.html). +[pages dedicated to import](/docs/cli/import/index.html). diff --git a/website/docs/state/index.html.md b/website/docs/language/state/index.html.md similarity index 90% rename from website/docs/state/index.html.md rename to website/docs/language/state/index.html.md index cfc70c88f..071cee81e 100644 --- a/website/docs/state/index.html.md +++ b/website/docs/language/state/index.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "State" sidebar_current: "docs-state" description: |- @@ -18,7 +18,7 @@ but it can also be stored remotely, which works better in a team environment. Terraform uses this local state to create plans and make changes to your infrastructure. Prior to any operation, Terraform does a -[refresh](/docs/commands/refresh.html) to update the state with the +[refresh](/docs/cli/commands/refresh.html) to update the state with the real infrastructure. The primary purpose of Terraform state is to store bindings between objects in @@ -29,13 +29,13 @@ resource instance, and then potentially update or delete that object in response to future configuration changes. For more information on why Terraform requires state and why Terraform cannot -function without state, please see the page [state purpose](/docs/state/purpose.html). +function without state, please see the page [state purpose](/docs/language/state/purpose.html). ## Inspection and Modification While the format of the state files are just JSON, direct file editing of the state is discouraged. Terraform provides the -[terraform state](/docs/commands/state/index.html) command to perform +[terraform state](/docs/cli/commands/state/index.html) command to perform basic modifications of the state using the CLI. The CLI usage and output of the state commands is structured to be @@ -62,16 +62,16 @@ State snapshots are stored in JSON format and new Terraform versions are generally backward compatible with state snapshots produced by earlier versions. However, the state format is subject to change in new Terraform versions, so if you build software that parses or modifies it directly you should expect -to perform ongoing maintenence of that software as the state format evolves +to perform ongoing maintenance of that software as the state format evolves in new versions. Alternatively, there are several integration points which produce JSON output that is specifically intended for consumption by external software: -* [The `terraform output` command](/docs/commands/output.html) +* [The `terraform output` command](/docs/cli/commands/output.html) has a `-json` option, for obtaining either the full set of root module output values or a specific named output value from the latest state snapshot. -* [The `terraform show` command](/docs/commands/show.html) has a `-json` +* [The `terraform show` command](/docs/cli/commands/show.html) has a `-json` option for inspecting the latest state snapshot in full, and also for inspecting saved plan files which include a copy of the prior state at the time the plan was made. diff --git a/website/docs/state/locking.html.md b/website/docs/language/state/locking.html.md similarity index 78% rename from website/docs/state/locking.html.md rename to website/docs/language/state/locking.html.md index 6a4b8648d..739a4b8c4 100644 --- a/website/docs/state/locking.html.md +++ b/website/docs/language/state/locking.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "State: Locking" sidebar_current: "docs-state-locking" description: |- @@ -8,7 +8,7 @@ description: |- # State Locking -If supported by your [backend](/docs/backends), Terraform will lock your +If supported by your [backend](/docs/language/settings/backends/index.html), Terraform will lock your state for all operations that could write state. This prevents others from acquiring the lock and potentially corrupting your state. @@ -21,13 +21,13 @@ If acquiring the lock is taking longer than expected, Terraform will output a status message. If Terraform doesn't output a message, state locking is still occurring if your backend supports it. -Not all [backends](/docs/backends) support locking. Please view the list -of [backend types](/docs/backends/types) for details on whether a backend -supports locking or not. +Not all backends support locking. The +[documentation for each backend](/docs/language/settings/backends/index.html) +includes details on whether it supports locking or not. ## Force Unlock -Terraform has a [force-unlock command](/docs/commands/force-unlock.html) +Terraform has a [force-unlock command](/docs/cli/commands/force-unlock.html) to manually unlock the state if unlocking failed. **Be very careful with this command.** If you unlock the state when someone diff --git a/website/docs/state/purpose.html.md b/website/docs/language/state/purpose.html.md similarity index 98% rename from website/docs/state/purpose.html.md rename to website/docs/language/state/purpose.html.md index fd0a5c911..44bfef6d1 100644 --- a/website/docs/state/purpose.html.md +++ b/website/docs/language/state/purpose.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "State" sidebar_current: "docs-state-purpose" description: |- @@ -105,7 +105,7 @@ started, but when using Terraform in a team it is important for everyone to be working with the same state so that operations will be applied to the same remote objects. -[Remote state](/docs/state/remote.html) is the recommended solution +[Remote state](/docs/language/state/remote.html) is the recommended solution to this problem. With a fully-featured state backend, Terraform can use remote locking as a measure to avoid two or more different users accidentally running Terraform at the same time, and thus ensure that each Terraform run diff --git a/website/docs/language/state/remote-state-data.html.md b/website/docs/language/state/remote-state-data.html.md new file mode 100644 index 000000000..717d8fb30 --- /dev/null +++ b/website/docs/language/state/remote-state-data.html.md @@ -0,0 +1,213 @@ +--- +layout: "language" +page_title: "The terraform_remote_state Data Source" +sidebar_current: "docs-terraform-datasource-remote-state" +description: |- + Retrieves the root module output values from a Terraform state snapshot stored in a remote backend. +--- + +# The `terraform_remote_state` Data Source + +[backends]: /docs/backends/index.html + +The `terraform_remote_state` data source retrieves the root module output values +from some other Terraform configuration, using the latest state snapshot from +the remote backend. + +This data source is built into Terraform, and is always available; you do not +need to require or configure a provider in order to use it. + +-> **Note:** This data source is implemented by a built-in provider, whose +[source address](/docs/language/providers/requirements.html#source-addresses) +is `terraform.io/builtin/terraform`. That provider does not include any other +resources or data sources. + +## Alternative Ways to Share Data Between Configurations + +Sharing data with root module outputs is convenient, but it has drawbacks. +Although `terraform_remote_state` only exposes output values, its user must have +access to the entire state snapshot, which often includes some sensitive +information. + +When possible, we recommend explicitly publishing data for external consumption +to a separate location instead of accessing it via remote state. This lets you +apply different access controls for shared information and state snapshots. + +To share data explicitly between configurations, you can use pairs of managed +resource types and data sources in various providers, including (but not +limited to) the following: + +| System | Publish with... | Read with... | +|--|--|--| +| Alibaba Cloud DNS
(for IP addresses and hostnames) | [`alicloud_alidns_record` resource type](https://registry.terraform.io/providers/aliyun/alicloud/latest/docs/resources/alidns_record) | Normal DNS lookups, or [the `dns` provider](https://registry.terraform.io/providers/hashicorp/dns/latest/docs) | +| Amazon Route53
(for IP addresses and hostnames) | [`aws_route53_record` resource type](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_record) | Normal DNS lookups, or [the `dns` provider](https://registry.terraform.io/providers/hashicorp/dns/latest/docs) | +| Amazon S3 | [`aws_s3_bucket_object` resource type](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_object) | [`aws_s3_bucket_object` data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/s3_bucket_object) | +| Amazon SSM Parameter Store | [`aws_ssm_parameter` resource type](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ssm_parameter) | [`aws_ssm_parameter` data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | +| Azure Automation | [`azurerm_automation_variable_string` resource type](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/automation_variable_string) | [`azurerm_automation_variable_string` data source](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/automation_variable_string) | +| Azure DNS
(for IP addresses and hostnames) | [`azurerm_dns_a_record` resource type](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/dns_a_record), etc | Normal DNS lookups, or [the `dns` provider](https://registry.terraform.io/providers/hashicorp/dns/latest/docs) | +| Google Cloud DNS
(for IP addresses and hostnames) | [`google_dns_record_set` resource type](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/dns_record_set) | Normal DNS lookups, or [the `dns` provider](https://registry.terraform.io/providers/hashicorp/dns/latest/docs) | +| Google Cloud Storage | [`google_storage_bucket_object` resource type](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/storage_bucket_object) | [`google_storage_bucket_object` data source](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/storage_bucket_object) and [`http` data source](https://registry.terraform.io/providers/hashicorp/http/latest/docs/data-sources/http) | +| HashiCorp Consul | [`consul_key_prefix` resource type](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/resources/key_prefix) | [`consul_key_prefix` data source](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/data-sources/key_prefix) | +| Kubernetes | [`kubernetes_config_map` resource type](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | [`kubernetes_config_map` data source](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/config_map) | +| OCI Object Storage | [`oci_objectstorage_bucket` resource type](https://registry.terraform.io/providers/hashicorp/oci/latest/docs/resources/objectstorage_object) | [`oci_objectstorage_bucket` data source](https://registry.terraform.io/providers/hashicorp/oci/latest/docs/data-sources/objectstorage_object) | + +-> These are some common options from the Official Terraform providers, but +there are too many configuration storage options for us to list them all +here, including some in partner and community providers. +Any pair of managed resource type and corresponding data source can potentially +be used to share data between Terraform configurations. See individual provider +documentation to find other possibilities. + +A key advantage of using a separate explicit configuration store instead of +`terraform_remote_state` is that the data can potentially also be read by +systems other than Terraform, such as configuration management or scheduler +systems within your compute instances. For that reason, we recommend selecting +a configuration store that your other infrastructure could potentially make +use of. For example: + +* If you wish to share IP addresses and hostnames, you could publish them as +normal DNS `A`, `AAAA`, `CNAME`, and `SRV` records in a private DNS zone and +then configure your other infrastructure to refer to that zone so you can +find infrastructure objects via your system's built-in DNS resolver. +* If you use HashiCorp Consul then publishing data to the Consul key/value +store or Consul service catalog can make that data also accessible via +[Consul Template](https://github.com/hashicorp/consul-template) +or the +[HashiCorp Nomad](https://www.nomadproject.io/docs/job-specification/template) +`template` stanza. +* If you use Kubernetes then you can +[make Config Maps available to your Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/). + +Some of the data stores listed above are specifically designed for storing +small configuration values, while others are generic blob storage systems. For +those generic systems, you can use +[the `jsonencode` function](https://www.terraform.io/docs/language/functions/jsonencode.html) +and +[the `jsondecode` function](https://www.terraform.io/docs/language/functions/jsondecode.html) respectively +to store and retrieve structured data. + +You can encapsulate the implementation details of retrieving your published +configuration data by writing a +[data-only module](/docs/language/modules/develop/composition.html#data-only-modules) +containing the necessary data source configuration and any necessary +post-processing such as JSON decoding. You can then change that module later +if you switch to a different strategy for sharing data between multiple +Terraform configurations. + +## Example Usage (`remote` Backend) + +```hcl +data "terraform_remote_state" "vpc" { + backend = "remote" + + config = { + organization = "hashicorp" + workspaces = { + name = "vpc-prod" + } + } +} + +# Terraform >= 0.12 +resource "aws_instance" "foo" { + # ... + subnet_id = data.terraform_remote_state.vpc.outputs.subnet_id +} + +# Terraform <= 0.11 +resource "aws_instance" "foo" { + # ... + subnet_id = "${data.terraform_remote_state.vpc.subnet_id}" +} +``` + +## Example Usage (`local` Backend) + +```hcl +data "terraform_remote_state" "vpc" { + backend = "local" + + config = { + path = "..." + } +} + +# Terraform >= 0.12 +resource "aws_instance" "foo" { + # ... + subnet_id = data.terraform_remote_state.vpc.outputs.subnet_id +} + +# Terraform <= 0.11 +resource "aws_instance" "foo" { + # ... + subnet_id = "${data.terraform_remote_state.vpc.subnet_id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `backend` - (Required) The remote backend to use. +* `workspace` - (Optional) The Terraform workspace to use, if the backend + supports workspaces. +* `config` - (Optional; object) The configuration of the remote backend. + Although this argument is listed as optional, most backends require + some configuration. + + The `config` object can use any arguments that would be valid in the + equivalent `terraform { backend "" { ... } }` block. See + [the documentation of your chosen backend](/docs/language/settings/backends/index.html) + for details. + + -> **Note:** If the backend configuration requires a nested block, specify + it here as a normal attribute with an object value. (For example, + `workspaces = { ... }` instead of `workspaces { ... }`.) +* `defaults` - (Optional; object) Default values for outputs, in case the state + file is empty or lacks a required output. + +## Attributes Reference + +In addition to the above, the following attributes are exported: + +* (v0.12+) `outputs` - An object containing every root-level + [output](/docs/language/values/outputs.html) in the remote state. +* (<= v0.11) `` - Each root-level [output](/docs/language/values/outputs.html) + in the remote state appears as a top level attribute on the data source. + +## Root Outputs Only + +Only the root-level output values from the remote state snapshot are exposed +for use elsewhere in your module. Resource data and output values from nested +modules are not accessible. + +If you wish to make a nested module output value accessible as a root module +output value, you must explicitly configure a passthrough in the root module. +For example: + +For example: + +```hcl +module "app" { + source = "..." +} + +output "app_value" { + # This syntax is for Terraform 0.12 or later. + value = module.app.example +} +``` + +In this example, the output value named `example` from the "app" module is +available as the `app_value` root module output value. If this configuration +didn't include the `output "app_value"` block then the data would not be +accessible via `terraform_remote_state`. + +~> **Warning:** Although `terraform_remote_state` doesn't expose any other +state snapshot information for use in configuration, the state snapshot data +is a single object and so any user or server which has enough access to read +the root module output values will also always have access to the full state +snapshot data by direct network requests. Don't use `terraform_remote_state` +if any of the resources in your configuration work with data that you consider +sensitive. diff --git a/website/docs/state/remote.html.md b/website/docs/language/state/remote.html.md similarity index 79% rename from website/docs/state/remote.html.md rename to website/docs/language/state/remote.html.md index a2328d994..998b47945 100644 --- a/website/docs/state/remote.html.md +++ b/website/docs/language/state/remote.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "State: Remote Storage" sidebar_current: "docs-state-remote" description: |- @@ -19,13 +19,13 @@ which can then be shared between all members of a team. Terraform supports storing state in [Terraform Cloud](https://www.hashicorp.com/products/terraform/), [HashiCorp Consul](https://www.consul.io/), Amazon S3, Azure Blob Storage, Google Cloud Storage, Alibaba Cloud OSS, and more. -Remote state is a feature of [backends](/docs/backends), which you can activate -in your configuration's root module. +Remote state is implemented by a [backend](/docs/language/settings/backends/index.html), +which you can configure in your configuration's root module. ## Delegation and Teamwork Remote state allows you to share -[output values](/docs/configuration/outputs.html) with other configurations. +[output values](/docs/language/values/outputs.html) with other configurations. This allows your infrastructure to be decomposed into smaller components. Put another way, remote state also allows teams to share infrastructure @@ -39,21 +39,21 @@ you can expose things such as VPC IDs, subnets, NAT instance IDs, etc. through remote state and have other Terraform states consume that. For example usage, see -[the `terraform_remote_state` data source](/docs/providers/terraform/d/remote_state.html). +[the `terraform_remote_state` data source](/docs/language/state/remote-state-data.html). While remote state can be a convenient, built-in mechanism for sharing data between configurations, you may prefer to use more general stores to pass settings both to other configurations and to other consumers. For example, if your environment has [HashiCorp Consul](https://www.consul.io/) then you can have one Terraform configuration that writes to Consul using -[`consul_key_prefix`](/docs/providers/consul/r/key_prefix.html) and then +[`consul_key_prefix`](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/resources/key_prefix) and then another that consumes those values using -[the `consul_keys` data source](/docs/providers/consul/d/keys.html). +[the `consul_keys` data source](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/data-sources/keys). ## Locking and Teamwork For fully-featured remote backends, Terraform can also use -[state locking](/docs/state/locking.html) to prevent concurrent runs of +[state locking](/docs/language/state/locking.html) to prevent concurrent runs of Terraform against the same state. [Terraform Cloud by HashiCorp](https://www.hashicorp.com/products/terraform/) diff --git a/website/docs/state/sensitive-data.html.md b/website/docs/language/state/sensitive-data.html.md similarity index 93% rename from website/docs/state/sensitive-data.html.md rename to website/docs/language/state/sensitive-data.html.md index f5ccaa116..49f5c1270 100644 --- a/website/docs/state/sensitive-data.html.md +++ b/website/docs/language/state/sensitive-data.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "State: Sensitive Data" sidebar_current: "docs-state-sensitive-data" description: |- @@ -15,7 +15,7 @@ passwords. When using local state, state is stored in plain-text JSON files. -When using [remote state](/docs/state/remote.html), state is only ever held in +When using [remote state](/docs/language/state/remote.html), state is only ever held in memory when used by Terraform. It may be encrypted at rest, but this depends on the specific remote state backend. diff --git a/website/docs/state/workspaces.html.md b/website/docs/language/state/workspaces.html.md similarity index 81% rename from website/docs/state/workspaces.html.md rename to website/docs/language/state/workspaces.html.md index e2a51c4b8..07db50a40 100644 --- a/website/docs/state/workspaces.html.md +++ b/website/docs/language/state/workspaces.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "State: Workspaces" sidebar_current: "docs-state-workspaces" description: |- @@ -8,9 +8,9 @@ description: |- # Workspaces -Each Terraform configuration has an associated [backend](/docs/backends/index.html) +Each Terraform configuration has an associated [backend](/docs/language/settings/backends/index.html) that defines how operations are executed and where persistent data such as -[the Terraform state](https://www.terraform.io/docs/state/purpose.html) are +[the Terraform state](https://www.terraform.io/docs/language/state/purpose.html) are stored. The persistent data stored in the backend belongs to a _workspace_. Initially @@ -25,16 +25,16 @@ credentials. Multiple workspaces are currently supported by the following backends: - * [AzureRM](/docs/backends/types/azurerm.html) - * [Consul](/docs/backends/types/consul.html) - * [COS](/docs/backends/types/cos.html) - * [GCS](/docs/backends/types/gcs.html) - * [Kubernetes](/docs/backends/types/kubernetes.html) - * [Local](/docs/backends/types/local.html) - * [Manta](/docs/backends/types/manta.html) - * [Postgres](/docs/backends/types/pg.html) - * [Remote](/docs/backends/types/remote.html) - * [S3](/docs/backends/types/s3.html) + * [AzureRM](/docs/language/settings/backends/azurerm.html) + * [Consul](/docs/language/settings/backends/consul.html) + * [COS](/docs/language/settings/backends/cos.html) + * [GCS](/docs/language/settings/backends/gcs.html) + * [Kubernetes](/docs/language/settings/backends/kubernetes.html) + * [Local](/docs/language/settings/backends/local.html) + * [Manta](/docs/language/settings/backends/manta.html) + * [Postgres](/docs/language/settings/backends/pg.html) + * [Remote](/docs/language/settings/backends/remote.html) + * [S3](/docs/language/settings/backends/s3.html) In the 0.9 line of Terraform releases, this concept was known as "environment". It was renamed in 0.10 based on feedback about confusion caused by the @@ -81,7 +81,7 @@ Within your Terraform configuration, you may include the name of the current workspace using the `${terraform.workspace}` interpolation sequence. This can be used anywhere interpolations are allowed. However, it should **not** be used in remote operations against Terraform Cloud workspaces. For an -explanation, see the [remote backend](../backends/types/remote.html#workspaces) +explanation, see the [remote backend](/docs/language/settings/backends/remote.html#workspaces) document. Referencing the current workspace is useful for changing behavior based @@ -146,7 +146,7 @@ In this case, the backend used for each deployment often belongs to that deployment, with different credentials and access controls. Named workspaces are _not_ a suitable isolation mechanism for this scenario. -Instead, use one or more [re-usable modules](/docs/modules/index.html) to +Instead, use one or more [re-usable modules](/docs/language/modules/develop/index.html) to represent the common elements, and then represent each instance as a separate configuration that instantiates those common elements in the context of a different backend. In that case, the root module of each configuration will @@ -158,25 +158,25 @@ rather than multiple deployments, data can be passed from one component to another using paired resources types and data sources. For example: * Where a shared [Consul](https://consul.io/) cluster is available, use - [`consul_key_prefix`](/docs/providers/consul/r/key_prefix.html) to - publish to the key/value store and [`consul_keys`](/docs/providers/consul/d/keys.html) + [`consul_key_prefix`](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/resources/key_prefix) to + publish to the key/value store and [`consul_keys`](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/data-sources/keys) to retrieve those values in other configurations. * In systems that support user-defined labels or tags, use a tagging convention to make resources automatically discoverable. For example, use - [the `aws_vpc` resource type](/docs/providers/aws/r/vpc.html) + [the `aws_vpc` resource type](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) to assign suitable tags and then - [the `aws_vpc` data source](/docs/providers/aws/d/vpc.html) + [the `aws_vpc` data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) to query by those tags in other configurations. * For server addresses, use a provider-specific resource to create a DNS record with a predictable name and then either use that name directly or - use [the `dns` provider](/docs/providers/dns/index.html) to retrieve + use [the `dns` provider](https://registry.terraform.io/providers/hashicorp/dns/latest/docs) to retrieve the published addresses in other configurations. * If a Terraform state for one configuration is stored in a remote backend that is accessible to other configurations then - [`terraform_remote_state`](/docs/providers/terraform/d/remote_state.html) + [`terraform_remote_state`](/docs/language/state/remote-state-data.html) can be used to directly consume its root module outputs from those other configurations. This creates a tighter coupling between configurations, but avoids the need for the "producer" configuration to explicitly @@ -194,9 +194,9 @@ local-only `terraform.tfstate`; some teams commit these files to version control, although using a remote backend instead is recommended when there are multiple collaborators. -For [remote state](/docs/state/remote.html), the workspaces are stored -directly in the configured [backend](/docs/backends). For example, if you -use [Consul](/docs/backends/types/consul.html), the workspaces are stored +For [remote state](/docs/language/state/remote.html), the workspaces are stored +directly in the configured [backend](/docs/language/settings/backends/index.html). For example, if you +use [Consul](/docs/language/settings/backends/consul.html), the workspaces are stored by appending the workspace name to the state path. To ensure that workspace names are stored correctly and safely in all backends, the name must be valid to use in a URL path segment without escaping. diff --git a/website/docs/configuration/syntax.html.md b/website/docs/language/syntax/configuration.html.md similarity index 94% rename from website/docs/configuration/syntax.html.md rename to website/docs/language/syntax/configuration.html.md index d0c7e9587..e8c38d51c 100644 --- a/website/docs/configuration/syntax.html.md +++ b/website/docs/language/syntax/configuration.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Syntax - Configuration Language" sidebar_current: "docs-config-syntax" description: |- @@ -10,10 +10,6 @@ description: |- # Configuration Syntax --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Syntax](../configuration-0-11/syntax.html). - Other pages in this section have described various configuration constructs that can appear in the Terraform language. This page describes the lower-level syntax of the language in more detail, revealing the building blocks that @@ -22,7 +18,7 @@ those constructs are built from. This page describes the _native syntax_ of the Terraform language, which is a rich language designed to be relatively easy for humans to read and write. The constructs in the Terraform language can also be expressed in -[JSON syntax](./syntax-json.html), which is harder for humans +[JSON syntax](/docs/language/syntax/json.html), which is harder for humans to read and edit but easier to generate and parse programmatically. This low-level syntax of the Terraform language is defined in terms of a @@ -53,7 +49,7 @@ after the equals sign is the argument's value. The context where the argument appears determines what value types are valid (for example, each resource type has a schema that defines the types of its arguments), but many arguments accept arbitrary -[expressions](./expressions.html), which allow the value to +[expressions](/docs/language/expressions/index.html), which allow the value to either be specified literally or generated from other values programmatically. -> **Note:** Terraform's configuration language is based on a more general diff --git a/website/docs/language/syntax/index.html.md b/website/docs/language/syntax/index.html.md new file mode 100644 index 000000000..83de5faaa --- /dev/null +++ b/website/docs/language/syntax/index.html.md @@ -0,0 +1,21 @@ +--- +layout: "language" +page_title: "Syntax Overview - Configuration Language" +--- + +# Syntax + +The majority of the Terraform language documentation focuses on the practical +uses of the language and the specific constructs it uses. The pages in this +section offer a more abstract view of the Terraform language. + +- [Configuration Syntax](/docs/language/syntax/configuration.html) describes the native + grammar of the Terraform language. +- [JSON Configuration Syntax](/docs/language/syntax/json.html) documents + how to represent Terraform language constructs in the pure JSON variant of the + Terraform language. Terraform's JSON syntax is unfriendly to humans, but can + be very useful when generating infrastructure as code with other systems that + don't have a readily available HCL library. +- [Style Conventions](/docs/language/syntax/style.html) documents some commonly + accepted formatting guidelines for Terraform code. These conventions can be + enforced automatically with [`terraform fmt`](/docs/cli/commands/fmt.html). diff --git a/website/docs/configuration/syntax-json.html.md b/website/docs/language/syntax/json.html.md similarity index 95% rename from website/docs/configuration/syntax-json.html.md rename to website/docs/language/syntax/json.html.md index d81911f22..393e2840b 100644 --- a/website/docs/configuration/syntax-json.html.md +++ b/website/docs/language/syntax/json.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "JSON Configuration Syntax - Configuration Language" sidebar_current: "docs-config-syntax-json" description: |- @@ -9,13 +9,8 @@ description: |- # JSON Configuration Syntax --> **Note:** This page is about Terraform 0.12 and later. The JSON configuration -syntax in 0.11 and earlier was never formally documented. For other information -about Terraform 0.11 and earlier, see -[0.11 Configuration Language](../configuration-0-11/index.html). - Most Terraform configurations are written in -[the native Terraform language syntax](./syntax.html), which is designed to be +[the native Terraform language syntax](/docs/language/syntax/configuration.html), which is designed to be relatively easy for humans to read and update. Terraform also supports an alternative syntax that is JSON-compatible. This @@ -99,7 +94,7 @@ different (see the [block-type-specific exceptions](#block-type-specific-excepti correspond either to argument names or to nested block type names. * Where a property corresponds to an argument that accepts - [arbitrary expressions](./expressions.html) in the native syntax, the + [arbitrary expressions](/docs/language/expressions/index.html) in the native syntax, the property value is mapped to an expression as described under [_Expression Mapping_](#expression-mapping) below. For arguments that do _not_ accept arbitrary expressions, the interpretation of the property @@ -116,20 +111,22 @@ different (see the [block-type-specific exceptions](#block-type-specific-excepti ## Expression Mapping Since JSON grammar is not able to represent all of the Terraform language -[expression syntax](./expressions.html), JSON values interpreted as expressions +[expression syntax](/docs/language/expressions/index.html), JSON values interpreted as expressions are mapped as follows: | JSON | Terraform Language Interpretation | | ------- | ------------------------------------------------------------------------------------------------------------- | | Boolean | A literal `bool` value. | | Number | A literal `number` value. | -| String | Parsed as a [string template](./expressions.html#string-templates) and then evaluated as described below. | +| String | Parsed as a [string template][] and then evaluated as described below. | | Object | Each property value is mapped per this table, producing an `object(...)` value with suitable attribute types. | | Array | Each element is mapped per this table, producing a `tuple(...)` value with suitable element types. | | Null | A literal `null`. | +[string template]: /docs/language/expressions/strings.html#string-templates + When a JSON string is encountered in a location where arbitrary expressions are -expected, its value is first parsed as a [string template](./expressions.html#string-templates) +expected, its value is first parsed as a [string template][] and then it is evaluated to produce the final result. If the given template consists _only_ of a single interpolation sequence, diff --git a/website/docs/configuration/style.html.md b/website/docs/language/syntax/style.html.md similarity index 93% rename from website/docs/configuration/style.html.md rename to website/docs/language/syntax/style.html.md index 2ca6e7326..c7599bd07 100644 --- a/website/docs/configuration/style.html.md +++ b/website/docs/language/syntax/style.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Style Conventions - Configuration Language" sidebar_current: "docs-config-style" description: |- @@ -10,10 +10,6 @@ description: |- # Style Conventions --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language](../configuration-0-11/index.html). - The Terraform parser allows you some flexibility in how you lay out the elements in your configuration files, but the Terraform language also has some idiomatic style conventions which we recommend users always follow diff --git a/website/docs/language/values/index.html.md b/website/docs/language/values/index.html.md new file mode 100644 index 000000000..e73972c41 --- /dev/null +++ b/website/docs/language/values/index.html.md @@ -0,0 +1,18 @@ +--- +layout: "language" +page_title: "Variables and Outputs" +--- + +# Variables and Outputs + +The Terraform language includes a few kinds of blocks for requesting or +publishing named values. + +- [Input Variables](/docs/language/values/variables.html) serve as parameters for + a Terraform module, so users can customize behavior without editing the source. + +- [Output Values](/docs/language/values/outputs.html) are like return values for a + Terraform module. + +- [Local Values](/docs/language/values/locals.html) are a convenience feature for + assigning a short name to an expression. diff --git a/website/docs/configuration/locals.html.md b/website/docs/language/values/locals.html.md similarity index 84% rename from website/docs/configuration/locals.html.md rename to website/docs/language/values/locals.html.md index 54502a288..ad755b264 100644 --- a/website/docs/configuration/locals.html.md +++ b/website/docs/language/values/locals.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Local Values - Configuration Language" sidebar_current: "docs-config-locals" description: |- @@ -9,11 +9,11 @@ description: |- # Local Values --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Local Values](../configuration-0-11/locals.html). +> **Hands-on:** Try the [Simplify Terraform Configuration with +Locals](https://learn.hashicorp.com/tutorials/terraform/locals?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) +tutorial on HashiCorp Learn. -A local value assigns a name to an [expression](./expressions.html), +A local value assigns a name to an [expression](/docs/language/expressions/index.html), so you can use it multiple times within a module without repeating it. @@ -61,7 +61,7 @@ locals { ## Using Local Values Once a local value is declared, you can reference it in -[expressions](./expressions.html) as `local.`. +[expressions](/docs/language/expressions/index.html) as `local.`. -> **Note:** Local values are _created_ by a `locals` block (plural), but you _reference_ them as attributes on an object named `local` (singular). Make sure diff --git a/website/docs/configuration/outputs.html.md b/website/docs/language/values/outputs.html.md similarity index 85% rename from website/docs/configuration/outputs.html.md rename to website/docs/language/values/outputs.html.md index 01c89acfd..60450f83d 100644 --- a/website/docs/configuration/outputs.html.md +++ b/website/docs/language/values/outputs.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Output Values - Configuration Language" sidebar_current: "docs-config-outputs" description: |- @@ -8,9 +8,9 @@ description: |- # Output Values --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Output Values](../configuration-0-11/outputs.html). +> **Hands-on:** Try the [Output Data From +Terraform](https://learn.hashicorp.com/tutorials/terraform/outputs?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) +tutorial on HashiCorp Learn. Output values are like the return values of a Terraform module, and have several uses: @@ -19,9 +19,9 @@ uses: to a parent module. - A root module can use outputs to print certain values in the CLI output after running `terraform apply`. -- When using [remote state](/docs/state/remote.html), root module outputs can be +- When using [remote state](/docs/language/state/remote.html), root module outputs can be accessed by other configurations via a - [`terraform_remote_state` data source](/docs/providers/terraform/d/remote_state.html). + [`terraform_remote_state` data source](/docs/language/state/remote-state-data.html). Resource instances managed by Terraform each export attributes whose values can be used elsewhere in configuration. Output values are a way to expose some @@ -42,11 +42,11 @@ output "instance_ip_addr" { ``` The label immediately after the `output` keyword is the name, which must be a -valid [identifier](./syntax.html#identifiers). In a root module, this name is +valid [identifier](/docs/language/syntax/configuration.html#identifiers). In a root module, this name is displayed to the user; in a child module, it can be used to access the output's value. -The `value` argument takes an [expression](./expressions.html) +The `value` argument takes an [expression](/docs/language/expressions/index.html) whose result is to be returned to the user. In this example, the expression refers to the `private_ip` attribute exposed by an `aws_instance` resource defined elsewhere in this module (not shown). Any valid expression is allowed @@ -66,6 +66,8 @@ value as `module.web_server.instance_ip_addr`. `output` blocks can optionally include `description`, `sensitive`, and `depends_on` arguments, which are described in the following sections. + + ### `description` — Output Value Documentation Because the output values of a module are part of its user interface, you can @@ -85,6 +87,8 @@ string might be included in documentation about the module, and so it should be written from the perspective of the user of the module rather than its maintainer. For commentary for module maintainers, use comments. + + ### `sensitive` — Suppressing Values in CLI Output An output can be marked as containing sensitive material using the optional @@ -98,7 +102,7 @@ output "db_password" { } ``` -Setting an output value as sensitive prevents Terraform from showing its value +Setting an output value as sensitive prevents Terraform from showing its value in `plan` and `apply`. In the following scenario, our root module has an output declared as sensitive and a module call with a sensitive output, which we then use in a resource attribute. @@ -122,7 +126,7 @@ output "out" { output "a" { value = "secret" - sensitive = true" + sensitive = true } ``` @@ -147,9 +151,11 @@ Changes to Outputs: -> **Note:** In Terraform versions prior to Terraform 0.14, setting an output value in the root module as sensitive would prevent Terraform from showing its value in the list of outputs at the end of `terraform apply`. However, the value could still display in the CLI output for other reasons, like if the value is referenced in an expression for a resource argument. Sensitive output values are still recorded in the -[state](/docs/state/index.html), and so will be visible to anyone who is able +[state](/docs/language/state/index.html), and so will be visible to anyone who is able to access the state data. For more information, see -[_Sensitive Data in State_](/docs/state/sensitive-data.html). +[_Sensitive Data in State_](/docs/language/state/sensitive-data.html). + + ### `depends_on` — Explicit Output Dependencies @@ -163,7 +169,7 @@ correctly determine the dependencies between resources defined in different modules. Just as with -[resource dependencies](./resources.html#resource-dependencies), +[resource dependencies](/docs/language/resources/behavior.html#resource-dependencies), Terraform analyzes the `value` expression for an output value and automatically determines a set of dependencies, but in less-common cases there are dependencies that cannot be recognized implicitly. In these rare cases, the diff --git a/website/docs/configuration/variables.html.md b/website/docs/language/values/variables.html.md similarity index 89% rename from website/docs/configuration/variables.html.md rename to website/docs/language/values/variables.html.md index 9525cc2de..75f6b4780 100644 --- a/website/docs/configuration/variables.html.md +++ b/website/docs/language/values/variables.html.md @@ -1,5 +1,5 @@ --- -layout: "docs" +layout: "language" page_title: "Input Variables - Configuration Language" sidebar_current: "docs-config-variables" description: |- @@ -9,11 +9,7 @@ description: |- # Input Variables --> **Note:** This page is about Terraform 0.12 and later. For Terraform 0.11 and -earlier, see -[0.11 Configuration Language: Input Variables](../configuration-0-11/variables.html). - -> **Hands-on:** Try the [Define Input Variables](https://learn.hashicorp.com/tutorials/terraform/aws-variables?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. +> **Hands-on:** Try the [Customize Terraform Configuration with Variables](https://learn.hashicorp.com/tutorials/terraform/variables?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. Input variables serve as parameters for a Terraform module, allowing aspects of the module to be customized without altering the module's own source code, @@ -21,7 +17,7 @@ and allowing modules to be shared between different configurations. When you declare variables in the root module of your configuration, you can set their values using CLI options and environment variables. -When you declare them in [child modules](./modules.html), +When you declare them in [child modules](/docs/language/modules/index.html), the calling module should pass values in the `module` block. If you're familiar with traditional programming languages, it can be useful to @@ -36,7 +32,7 @@ compare Terraform modules to function definitions: variable is being discussed. Other kinds of variables in Terraform include _environment variables_ (set by the shell where Terraform runs) and _expression variables_ (used to indirectly represent a value in an -[expression](./expressions.html)). +[expression](/docs/language/expressions/index.html)). ## Declaring an Input Variable @@ -74,11 +70,11 @@ be unique among all variables in the same module. This name is used to assign a value to the variable from outside and to reference the variable's value from within the module. -The name of a variable can be any valid [identifier](./syntax.html#identifiers) +The name of a variable can be any valid [identifier](/docs/language/syntax/configuration.html#identifiers) _except_ the following: `source`, `version`, `providers`, `count`, `for_each`, `lifecycle`, `depends_on`, `locals`. These names are reserved for meta-arguments in -[module configuration blocks](./modules.html), and cannot be +[module configuration blocks](/docs/language/modules/syntax.html), and cannot be declared as variable names. ## Arguments @@ -106,7 +102,7 @@ configuration. [inpage-type]: #type-constraints The `type` argument in a `variable` block allows you to restrict the -[type of value](./expressions.html#types-and-values) that will be accepted as +[type of value](/docs/language/expressions/types.html) that will be accepted as the value for a variable. If no type constraint is set then a value of any type is accepted. @@ -133,7 +129,7 @@ collections: The keyword `any` may be used to indicate that any type is acceptable. For more information on the meaning and behavior of these different types, as well as detailed information about automatic conversion of complex types, see -[Type Constraints](./types.html). +[Type Constraints](/docs/language/expressions/types.html). If both the `type` and `default` arguments are specified, the given default value must be convertible to the specified type. @@ -187,7 +183,7 @@ The expression can refer only to the variable that the condition applies to, and _must not_ produce errors. If the failure of an expression is the basis of the validation decision, use -[the `can` function](./functions/can.html) to detect such errors. For example: +[the `can` function](/docs/language/functions/can.html) to detect such errors. For example: ```hcl variable "image_id" { @@ -211,11 +207,13 @@ using a sentence structure similar to the above examples. [inpage-sensitive]: #suppressing-values-in-cli-output --> This feature was introduced in Terraform CLI v0.14.0. +-> This feature was introduced in Terraform v0.14.0. + +> **Hands-on:** Try the [Protect Sensitive Input Variables](https://learn.hashicorp.com/tutorials/terraform/sensitive-variables?in=terraform/configuration-language&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial on HashiCorp Learn. Setting a variable as `sensitive` prevents Terraform from showing its value in the `plan` or `apply` output, when that variable is used within a configuration. -Sensitive values are still recorded in the [state](/docs/state/index.html), and so will be visible to anyone who is able to access the state data. For more information, see [_Sensitive Data in State_](/docs/state/sensitive-data.html). +Sensitive values are still recorded in the [state](/docs/language/state/index.html), and so will be visible to anyone who is able to access the state data. For more information, see [_Sensitive Data in State_](/docs/language/state/sensitive-data.html). A provider can define [an attribute as sensitive](/docs/extend/best-practices/sensitive-state.html#using-the-sensitive-flag), which prevents the value of that attribute from being displayed in logs or regular output. The `sensitive` argument on variables allows users to replicate this behavior for values in their configuration, by defining a variable as `sensitive`. @@ -302,7 +300,7 @@ random_pet.animal: Creation complete after 0s [id=jae-known-mongoose] ## Using Input Variable Values Within the module that declared a variable, its value can be accessed from -within [expressions](./expressions.html) as `var.`, +within [expressions](/docs/language/expressions/index.html) as `var.`, where `` matches the label given in the declaration block: -> **Note:** Input variables are _created_ by a `variable` block, but you @@ -332,7 +330,7 @@ can be set in a number of ways: The following sections describe these options in more detail. This section does not apply to _child_ modules, where values for input variables are instead assigned in the configuration of their parent module, as described in -[_Modules_](./modules.html). +[_Modules_](/docs/language/modules/index.html). ### Variables on the Command Line @@ -341,12 +339,14 @@ when running the `terraform plan` and `terraform apply` commands: ``` terraform apply -var="image_id=ami-abc123" -terraform apply -var='image_id_list=["ami-abc123","ami-def456"]' +terraform apply -var='image_id_list=["ami-abc123","ami-def456"]' -var="instance_type=t2.micro" terraform apply -var='image_id_map={"us-east-1":"ami-abc123","us-east-2":"ami-def456"}' ``` The `-var` option can be used any number of times in a single command. + + ### Variable Definitions (`.tfvars`) Files To set lots of variables, it is more convenient to specify their values in @@ -411,9 +411,10 @@ and lower case letters as in the above example. ### Complex-typed Values -When variable values are provided in a variable definitions file, Terraform's -[usual syntax](./expressions.html#structural-types) can be used to assign -complex-typed values, like lists and maps. +When variable values are provided in a variable definitions file, you can use +Terraform's usual syntax for +[literal expressions](/docs/language/expressions/types.html#literal-expressions) +to assign complex-typed values, like lists and maps. Some special rules apply to the `-var` command line option and to environment variables. For convenience, Terraform defaults to interpreting `-var` and diff --git a/website/docs/plugins/basics.html.md b/website/docs/plugins/basics.html.md deleted file mode 100644 index 55103f201..000000000 --- a/website/docs/plugins/basics.html.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -layout: "docs" -page_title: "Plugin Basics" -sidebar_current: "docs-plugins-basics" -description: |- - This page documents the basics of how the plugin system in Terraform works, and how to setup a basic development environment for plugin development if you're writing a Terraform plugin. ---- - -# Plugin Basics - -~> **Advanced topic!** Plugin development is a highly advanced -topic in Terraform, and is not required knowledge for day-to-day usage. -If you don't plan on writing any plugins, this section of the documentation is -not necessary to read. For general use of Terraform, please see -[Intro to Terraform](/intro/index.html) or the -[Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) -collection on HashiCorp Learn. - -This page documents the basics of how the plugin system in Terraform -works, and how to setup a basic development environment for plugin development -if you're writing a Terraform plugin. - -## How it Works - -Terraform providers and provisioners are provided via plugins. Each plugin -exposes an implementation for a specific service, such as AWS, or provisioner, -such as bash. Plugins are executed as a separate process and communicate with -the main Terraform binary over an RPC interface. - -More details are available in -_[Plugin Internals](/docs/internals/internal-plugins.html)_. - -The code within the binaries must adhere to certain interfaces. -The network communication and RPC is handled automatically by higher-level -Terraform libraries. The exact interface to implement is documented -in its respective documentation section. - -## Installing Plugins - -The [provider plugins distributed by HashiCorp](/docs/providers/index.html) are -automatically installed by `terraform init`. Third-party plugins (both -providers and provisioners) can be manually installed into the user plugins -directory, located at `%APPDATA%\terraform.d\plugins` on Windows and -`~/.terraform.d/plugins` on other systems. - -For more information, see: - -- [Configuring Providers](/docs/configuration/providers.html) -- [Configuring Providers: Third-party Plugins](/docs/configuration/providers.html#third-party-plugins) - -For developer-centric documentation, see: - -- [How Terraform Works: Plugin Discovery](/docs/extend/how-terraform-works.html#discovery) - -## Developing a Plugin - -Developing a plugin is simple. The only knowledge necessary to write -a plugin is basic command-line skills and basic knowledge of the -[Go programming language](http://golang.org). - --> **Note:** A common pitfall is not properly setting up a -$GOPATH. This can lead to strange errors. You can read more about -this [here](https://golang.org/doc/code.html) to familiarize -yourself. - -Create a new Go project somewhere in your `$GOPATH`. If you're a -GitHub user, we recommend creating the project in the directory -`$GOPATH/src/github.com/USERNAME/terraform-NAME`, where `USERNAME` -is your GitHub username and `NAME` is the name of the plugin you're -developing. This structure is what Go expects and simplifies things down -the road. - -The `NAME` should either begin with `provider-` or `provisioner-`, -depending on what kind of plugin it will be. The repository name will, -by default, be the name of the binary produced by `go install` for -your plugin package. - -With the package directory made, create a `main.go` file. This project will -be a binary so the package is "main": - -```golang -package main - -import ( - "github.com/hashicorp/terraform/plugin" -) - -func main() { - plugin.Serve(new(MyPlugin)) -} -``` - -The name `MyPlugin` is a placeholder for the struct type that represents -your plugin's implementation. This must implement either -`terraform.ResourceProvider` or `terraform.ResourceProvisioner`, depending -on the plugin type. - -To test your plugin, the easiest method is to copy your `terraform` binary -to `$GOPATH/bin` and ensure that this copy is the one being used for testing. -`terraform init` will search for plugins within the same directory as the -`terraform` binary, and `$GOPATH/bin` is the directory into which `go install` -will place the plugin executable. diff --git a/website/docs/plugins/index.html.md b/website/docs/plugins/index.html.md deleted file mode 100644 index 7970e704c..000000000 --- a/website/docs/plugins/index.html.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -layout: "docs" -page_title: "Plugins" -sidebar_current: "docs-plugins" -description: |- - Terraform is built on a plugin-based architecture. All providers and provisioners that are used in Terraform configurations are plugins, even the core types such as AWS and Heroku. Users of Terraform are able to write new plugins in order to support new functionality in Terraform. ---- - -# Plugins - -Terraform is built on a plugin-based architecture. All providers and -provisioners that are used in Terraform configurations are plugins, even -the core types such as AWS and Heroku. Users of Terraform are able to -write new plugins in order to support new functionality in Terraform. - -This section of the documentation gives a high-level overview of how -to write plugins for Terraform. It does not hold your hand through the -process, however, and expects a relatively high level of understanding -of Go, provider semantics, Unix, etc. - -~> **Advanced topic!** Plugin development is a highly advanced -topic in Terraform, and is not required knowledge for day-to-day usage. -If you don't plan on writing any plugins, we recommend not reading -this section of the documentation. diff --git a/website/docs/plugins/provider.html.md b/website/docs/plugins/provider.html.md deleted file mode 100644 index 12a478130..000000000 --- a/website/docs/plugins/provider.html.md +++ /dev/null @@ -1,309 +0,0 @@ ---- -layout: "docs" -page_title: "Provider Plugins" -sidebar_current: "docs-plugins-provider" -description: |- - A provider in Terraform is responsible for the lifecycle of a resource: create, read, update, delete. An example of a provider is AWS, which can manage resources of type `aws_instance`, `aws_eip`, `aws_elb`, etc. ---- - -# Provider Plugins - -~> **Advanced topic!** Plugin development is a highly advanced -topic in Terraform, and is not required knowledge for day-to-day usage. -If you don't plan on writing any plugins, this section of the documentation is -not necessary to read. For general use of Terraform, please see -[Intro to Terraform](/intro/index.html) or the -[Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) -collection on HashiCorp Learn. - -> **Hands-on:** Try the [Call APIs with Terraform Providers](https://learn.hashicorp.com/collections/terraform/providers?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn. - -A provider in Terraform is responsible for the lifecycle of a resource: -create, read, update, delete. An example of a provider is AWS, which -can manage resources of type `aws_instance`, `aws_eip`, `aws_elb`, etc. - -The primary reasons to care about provider plugins are: - - * You want to add a new resource type to an existing provider. - - * You want to write a completely new provider for managing resource - types in a system not yet supported. - - * You want to write a completely new provider for custom, internal - systems such as a private inventory management system. - -If you're interested in provider development, then read on. The remainder -of this page will assume you're familiar with -[plugin basics](/docs/plugins/basics.html) and that you already have -a basic development environment setup. - -## Provider Plugin Codebases - -Provider plugins live outside of the Terraform core codebase in their own -source code repositories, and are typically published in a provider registry -such as [the public Terraform Registry](https://registry.terraform.io/). - -When developing a provider plugin, it is recommended to use a common `GOPATH` -that includes both the core Terraform repository and the repositories of any -providers being changed. This makes it easier to use a locally-built -`terraform` executable and a set of locally-built provider plugins together -without further configuration. - -For example, to download both Terraform and the `template` provider into -`GOPATH`: - -``` -$ go get github.com/hashicorp/terraform -$ go get github.com/terraform-providers/terraform-provider-template -``` - -These two packages are both "main" packages that can be built into separate -executables with `go install`: - -``` -$ go install github.com/hashicorp/terraform -$ go install github.com/terraform-providers/terraform-provider-template -``` - -After running the above commands, both Terraform core and the `template` -provider will both be installed in the current `GOPATH` and `$GOPATH/bin` -will contain both `terraform` and `terraform-provider-template` executables. -This `terraform` executable will find and use the `template` provider plugin -alongside it in the `bin` directory in preference to downloading and installing -an official release. - -When constructing a new provider from scratch, it's recommended to follow -a similar repository structure as for the existing providers, with the main -package in the repository root and a library package in a subdirectory named -after the provider. For more information, see -[Writing Custom Providers](/docs/extend/writing-custom-providers.html) in the -[Extending Terraform section](/docs/extend/index.html). - -When making changes only to files within the provider repository, it is _not_ -necessary to re-build the main Terraform executable. Note that some packages -from the Terraform repository are used as library dependencies by providers, -such as `github.com/hashicorp/terraform/helper/schema`; it is recommended to -use `govendor` to create a local vendor copy of the relevant packages in the -provider repository, as can be seen in the repositories within the -`terraform-providers` GitHub organization. - -## Low-Level Interface - -The interface you must implement for providers is -[ResourceProvider](https://github.com/hashicorp/terraform/blob/master/terraform/resource_provider.go). - -This interface is extremely low level, however, and we don't recommend -you implement it directly. Implementing the interface directly is error -prone, complicated, and difficult. - -Instead, we've developed some higher level libraries to help you out -with developing providers. These are the same libraries we use in our -own core providers. - -## helper/schema - -The `helper/schema` package in the plugin SDK is a framework designed to allow -building providers at a higher level of abstraction than the raw plugin protocol -that Terraform expects. This is the same library we've used to build most -of the official providers. - -For more information on `helper/schema`, see -[the `helper/schema` package reference documentation](https://pkg.go.dev/github.com/hashicorp/terraform-plugin-sdk/helper/schema). - -## Provider - -The first thing to do in your plugin is to create the -[schema.Provider](https://godoc.org/github.com/hashicorp/terraform/helper/schema#Provider) structure. -This structure implements the `ResourceProvider` interface. We -recommend creating this structure in a function to make testing easier -later. Example: - -```golang -func Provider() *schema.Provider { - return &schema.Provider{ - ... - } -} -``` - -Within the `schema.Provider`, you should initialize all the fields. They -are documented within the godoc, but a brief overview is here as well: - - * `Schema` - This is the configuration schema for the provider itself. - You should define any API keys, etc. here. Schemas are covered below. - - * `ResourcesMap` - The map of resources that this provider supports. - All keys are resource names and the values are the - [schema.Resource](https://godoc.org/github.com/hashicorp/terraform/helper/schema#Resource) structures implementing this resource. - - * `ConfigureFunc` - This function callback is used to configure the - provider. This function should do things such as initialize any API - clients, validate API keys, etc. The `interface{}` return value of - this function is the `meta` parameter that will be passed into all - resource [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) - functions. In general, the returned value is a configuration structure - or a client. - -As part of the unit tests, you should call `InternalValidate`. This is used -to verify the structure of the provider and all of the resources, and reports -an error if it is invalid. An example test is shown below: - -```golang -func TestProvider(t *testing.T) { - if err := Provider().(*schema.Provider).InternalValidate(); err != nil { - t.Fatalf("err: %s", err) - } -} -``` - -Having this unit test will catch a lot of beginner mistakes as you build -your provider. - -## Resources - -Next, you'll want to create the resources that the provider can manage. -These resources are put into the `ResourcesMap` field of the provider -structure. Again, we recommend creating functions to instantiate these. -An example is shown below. - -```golang -func resourceComputeAddress() *schema.Resource { - return &schema.Resource { - ... - } -} -``` - -Resources are described using the -[schema.Resource](https://godoc.org/github.com/hashicorp/terraform/helper/schema#Resource) -structure. This structure has the following fields: - - * `Schema` - The configuration schema for this resource. Schemas are - covered in more detail below. - - * `Create`, `Read`, `Update`, and `Delete` - These are the callback - functions that implement CRUD operations for the resource. The only - optional field is `Update`. If your resource doesn't support update, then - you may keep that field nil. - - * `Importer` - If this is non-nil, then this resource is - [importable](/docs/import/importability.html). It is recommended to - implement this. - -The CRUD operations in more detail, along with their contracts: - - * `Create` - This is called to create a new instance of the resource. - Terraform guarantees that an existing ID is not set on the resource - data. That is, you're working with a new resource. Therefore, you are - responsible for calling `SetId` on your `schema.ResourceData` using a - value suitable for your resource. This ensures whatever resource - state you set on `schema.ResourceData` will be persisted in local state. - If you neglect to `SetId`, no resource state will be persisted. - - * `Read` - This is called to resync the local state with the remote state. - Terraform guarantees that an existing ID will be set. This ID should be - used to look up the resource. Any remote data should be updated into - the local data. **No changes to the remote resource are to be made.** - If the resource is no longer present, calling `SetId` - with an empty string will signal its removal. - - * `Update` - This is called to update properties of an existing resource. - Terraform guarantees that an existing ID will be set. Additionally, - the only changed attributes are guaranteed to be those that support - update, as specified by the schema. Be careful to read about partial - states below. - - * `Delete` - This is called to delete the resource. Terraform guarantees - an existing ID will be set. - - * `Exists` - This is called to verify a resource still exists. It is - called prior to `Read`, and lowers the burden of `Read` to be able - to assume the resource exists. `false` should be returned if - the resources is no longer present, which has the same effect - as calling `SetId("")` from `Read` (i.e. removal of the resource data - from state). - -## Schemas - -Both providers and resources require a schema to be specified. The schema -is used to define the structure of the configuration, the types, etc. It is -very important to get correct. - -In both provider and resource, the schema is a `map[string]*schema.Schema`. -The key of this map is the configuration key, and the value is a schema for -the value of that key. - -Schemas are incredibly powerful, so this documentation page won't attempt -to cover the full power of them. Instead, the API docs should be referenced -which cover all available settings. - -We recommend viewing schemas of existing or similar providers to learn -best practices. A good starting place is the -[core Terraform providers](https://github.com/terraform-providers). - -## Resource Data - -The parameter to provider configuration as well as all the CRUD operations -on a resource is a -[schema.ResourceData](https://godoc.org/github.com/hashicorp/terraform/helper/schema#ResourceData). -This structure is used to query configurations as well as to set information -about the resource such as its ID, connection information, and computed -attributes. - -The API documentation covers ResourceData well, as well as the core providers -in Terraform. - -**Partial state** deserves a special mention. Occasionally in Terraform, create or -update operations are not atomic; they can fail halfway through. As an example, -when creating an AWS security group, creating the group may succeed, -but creating all the initial rules may fail. In this case, it is incredibly -important that Terraform record the correct _partial state_ so that a -subsequent `terraform apply` fixes this resource. - -Most of the time, partial state is not required. When it is, it must be -specifically enabled. An example is shown below: - -```golang -func resourceUpdate(d *schema.ResourceData, meta interface{}) error { - // Enable partial state mode - d.Partial(true) - - if d.HasChange("tags") { - // If an error occurs, return with an error, - // we didn't finish updating - if err := updateTags(d, meta); err != nil { - return err - } - - d.SetPartial("tags") - } - - if d.HasChange("name") { - if err := updateName(d, meta); err != nil { - return err - } - - d.SetPartial("name") - } - - // We succeeded, disable partial mode - d.Partial(false) - - return nil -} -``` - -In the example above, it is possible that setting the `tags` succeeds, -but setting the `name` fails. In this scenario, we want to make sure -that only the state of the `tags` is updated. To do this the -`Partial` and `SetPartial` functions are used. - -`Partial` toggles partial-state mode. When disabled, all changes are merged -into the state upon result of the operation. When enabled, only changes -enabled with `SetPartial` are merged in. - -`SetPartial` tells Terraform what state changes to adopt upon completion -of an operation. You should call `SetPartial` with every key that is safe -to merge into the state. The parameter to `SetPartial` is a prefix, so -if you have a nested structure and want to accept the whole thing, -you can just specify the prefix. diff --git a/website/docs/providers/index.html.markdown b/website/docs/providers/index.html.markdown index 8ab5cf294..2656c9784 100644 --- a/website/docs/providers/index.html.markdown +++ b/website/docs/providers/index.html.markdown @@ -1,223 +1,44 @@ --- -layout: "docs" -page_title: "Providers" +layout: "language" +page_title: "Provider Documentation" sidebar_current: "docs-providers" description: |- - Terraform is used to create, manage, and manipulate infrastructure resources. Examples of resources include physical machines, VMs, network switches, containers, etc. Almost any infrastructure noun can be represented as a resource in Terraform. + Terraform's resources are implemented by provider plugins. The Terraform + Registry is the main directory of publicly available Terraform providers. --- -# Providers - -Terraform is used to create, manage, and update infrastructure resources such -as physical machines, VMs, network switches, containers, and more. Almost any -infrastructure type can be represented as a resource in Terraform. - -A provider is responsible for understanding API interactions and exposing -resources. Most providers configure a specific infrastructure platform (either -cloud or self-hosted). Providers can also offer local utilities for tasks like -generating random numbers for unique resource names. - -## Providers in the Terraform Registry - -The [Terraform Registry](https://registry.terraform.io/browse/providers) -is the main directory of publicly available Terraform providers, and hosts -providers for most major infrastructure platforms. - -Once you've found a provider you want to use, you can require it in your -Terraform configuration and start using the resource types it provides. -Terraform can automatically install providers from the Terraform Registry when -you run `terraform init`. - -- To find providers for the infrastructure platforms you use, browse - [the providers section of the Terraform Registry](https://registry.terraform.io/browse/providers). -- For details about how to use providers in your Terraform configurations, see - [Provider Requirements](../configuration/provider-requirements.html) and - [Provider Configuration](../configuration/providers.html). - -### Provider Documentation +# Provider Documentation Every Terraform provider has its own documentation, describing its resource types and their arguments. -The Terraform Registry is also the main home for provider documentation. -When viewing a provider's page on the Terraform Registry, you can click the -"Documentation" link in the header to browse its documentation. Provider -documentation in the registry is versioned, and you can use the dropdown version -menu in the header to switch which version's documentation you are viewing. +The [Terraform Registry](https://registry.terraform.io/browse/providers) is the +main home for provider documentation. When viewing a provider's page on the +Terraform Registry, you can click the "Documentation" link in the header to +browse its documentation. -## Lists of Terraform Providers +Provider documentation in the Registry is versioned; you can use the version +menu in the header to change which version you're viewing. -Provider documentation used to be hosted directly on terraform.io, as part of -Terraform's core documentation. Although some provider documentation might still -be hosted here, the Terraform Registry is now the main home for all public -provider docs. (The exception is the built-in -[`terraform` provider](/docs/providers/terraform/index.html) for reading state -data, since it is not available on the Terraform Registry.) +Learn more about writing, generating, and rendering provider documentation +in the [provider publishing documentation](/docs/registry/providers/docs.html). -As part of the old provider documentation, this section of the site included -categorized lists of all of the providers that could be automatically installed -by older versions of Terraform, plus a supplemental list of community providers -that needed to be manually installed. Many of these providers have already moved -to the Terraform Registry, but we will continue to host these lists for a while -as part of the transition. Links to provider documentation URLs on terraform.io -should still work, but will now redirect to the equivalent page in the Terraform -Registry. +## Temporary Provider Documentation -Use the navigation to the left to browse the categorized lists, or see the main -list of historical providers below. +The following providers will be published on the Terraform Registry soon, but +aren't quite ready. Until they're published, their documentation is available at +the links below: -
- - -- [ACME](/docs/providers/acme/index.html) -- [Akamai](/docs/providers/akamai/index.html) -- [Alibaba Cloud](/docs/providers/alicloud/index.html) -- [Archive](/docs/providers/archive/index.html) -- [Arukas](/docs/providers/arukas/index.html) -- [Auth0](/docs/providers/auth0/index.html) - [Avi Vantage](/docs/providers/avi/index.html) -- [Aviatrix](/docs/providers/aviatrix/index.html) -- [AWS](/docs/providers/aws/index.html) -- [Azure](/docs/providers/azurerm/index.html) -- [Azure Active Directory](/docs/providers/azuread/index.html) -- [Azure DevOps](/docs/providers/azuredevops/index.html) -- [Azure Stack](/docs/providers/azurestack/index.html) -- [A10 Networks](/docs/providers/vthunder/index.html) -- [BaiduCloud](/docs/providers/baiducloud/index.html) -- [Bitbucket](/docs/providers/bitbucket/index.html) -- [Brightbox](/docs/providers/brightbox/index.html) -- [CenturyLinkCloud](/docs/providers/clc/index.html) -- [Check Point](/docs/providers/checkpoint/index.html) - [Chef](/docs/providers/chef/index.html) -- [CherryServers](/docs/providers/cherryservers/index.html) -- [Circonus](/docs/providers/circonus/index.html) -- [Cisco ASA](/docs/providers/ciscoasa/index.html) -- [Cisco ACI](/docs/providers/aci/index.html) -- [Cisco MSO](/docs/providers/mso/index.html) -- [CloudAMQP](/docs/providers/cloudamqp/index.html) -- [Cloudflare](/docs/providers/cloudflare/index.html) -- [Cloud-init](/docs/providers/cloudinit/index.html) -- [CloudScale.ch](/docs/providers/cloudscale/index.html) -- [CloudStack](/docs/providers/cloudstack/index.html) - [Cobbler](/docs/providers/cobbler/index.html) -- [Cohesity](/docs/providers/cohesity/index.html) -- [Constellix](/docs/providers/constellix/index.html) -- [Consul](/docs/providers/consul/index.html) -- [Datadog](/docs/providers/datadog/index.html) -- [DigitalOcean](/docs/providers/do/index.html) -- [DNS](/docs/providers/dns/index.html) -- [DNSimple](/docs/providers/dnsimple/index.html) -- [DNSMadeEasy](/docs/providers/dme/index.html) -- [Docker](/docs/providers/docker/index.html) -- [Dome9](/docs/providers/dome9/index.html) -- [Dyn](/docs/providers/dyn/index.html) -- [EnterpriseCloud](/docs/providers/ecl/index.html) -- [Exoscale](/docs/providers/exoscale/index.html) -- [External](/docs/providers/external/index.html) -- [F5 BIG-IP](/docs/providers/bigip/index.html) -- [Fastly](/docs/providers/fastly/index.html) -- [FlexibleEngine](/docs/providers/flexibleengine/index.html) -- [FortiOS](/docs/providers/fortios/index.html) - [Genymotion](/docs/providers/genymotion/index.html) -- [GitHub](/docs/providers/github/index.html) -- [GitLab](/docs/providers/gitlab/index.html) -- [Google Cloud Platform](/docs/providers/google/index.html) -- [Grafana](/docs/providers/grafana/index.html) -- [Gridscale](/docs/providers/gridscale) -- [Hedvig](/docs/providers/hedvig/index.html) -- [Helm](/docs/providers/helm/index.html) -- [Heroku](/docs/providers/heroku/index.html) -- [Hetzner Cloud](/docs/providers/hcloud/index.html) -- [HTTP](/docs/providers/http/index.html) -- [HuaweiCloud](/docs/providers/huaweicloud/index.html) -- [HuaweiCloudStack](/docs/providers/huaweicloudstack/index.html) -- [Icinga2](/docs/providers/icinga2/index.html) -- [Ignition](/docs/providers/ignition/index.html) -- [Incapsula](/docs/providers/incapsula/index.html) -- [InfluxDB](/docs/providers/influxdb/index.html) - [Infoblox](/docs/providers/infoblox/index.html) -- [JDCloud](/docs/providers/jdcloud/index.html) -- [KingsoftCloud](/docs/providers/ksyun/index.html) -- [Kubernetes](/docs/providers/kubernetes/index.html) -- [Lacework](/docs/providers/lacework/index.html) -- [LaunchDarkly](/docs/providers/launchdarkly/index.html) -- [Librato](/docs/providers/librato/index.html) -- [Linode](/docs/providers/linode/index.html) -- [Local](/docs/providers/local/index.html) -- [Logentries](/docs/providers/logentries/index.html) -- [LogicMonitor](/docs/providers/logicmonitor/index.html) -- [Mailgun](/docs/providers/mailgun/index.html) -- [MetalCloud](/docs/providers/metalcloud/index.html) -- [MongoDB Atlas](/docs/providers/mongodbatlas/index.html) - [MySQL](/docs/providers/mysql/index.html) -- [Naver Cloud](/docs/providers/ncloud/index.html) -- [Netlify](/docs/providers/netlify/index.html) -- [New Relic](https://registry.terraform.io/providers/newrelic/newrelic/latest/docs) -- [Nomad](/docs/providers/nomad/index.html) -- [NS1](/docs/providers/ns1/index.html) -- [Null](https://registry.terraform.io/providers/hashicorp/null/latest/docs) -- [Nutanix](/docs/providers/nutanix/index.html) -- [1&1](/docs/providers/oneandone/index.html) -- [Okta](/docs/providers/okta/index.html) -- [Okta Advanced Server Access](/docs/providers/oktaasa/index.html) -- [OpenNebula](/docs/providers/opennebula/index.html) -- [OpenStack](/docs/providers/openstack/index.html) -- [OpenTelekomCloud](/docs/providers/opentelekomcloud/index.html) -- [OpsGenie](/docs/providers/opsgenie/index.html) -- [Oracle Cloud Infrastructure](/docs/providers/oci/index.html) -- [Oracle Cloud Platform](/docs/providers/oraclepaas/index.html) -- [Oracle Public Cloud](/docs/providers/opc/index.html) -- [OVH](/docs/providers/ovh/index.html) -- [Packet](/docs/providers/packet/index.html) -- [PagerDuty](/docs/providers/pagerduty/index.html) -- [Palo Alto Networks PANOS](/docs/providers/panos/index.html) -- [Palo Alto Networks PrismaCloud](/docs/providers/prismacloud/index.html) -- [PostgreSQL](/docs/providers/postgresql/index.html) -- [PowerDNS](/docs/providers/powerdns/index.html) -- [ProfitBricks](/docs/providers/profitbricks/index.html) -- [Pureport](/docs/providers/pureport/index.html) -- [RabbitMQ](/docs/providers/rabbitmq/index.html) -- [Rancher](/docs/providers/rancher/index.html) -- [Rancher2](/docs/providers/rancher2/index.html) -- [Random](https://registry.terraform.io/providers/hashicorp/random/latest/docs) -- [RightScale](/docs/providers/rightscale/index.html) - [Rubrik](/docs/providers/rubrik/index.html) - [Rundeck](/docs/providers/rundeck/index.html) -- [RunScope](/docs/providers/runscope/index.html) -- [Scaleway](/docs/providers/scaleway/index.html) -- [Selectel](/docs/providers/selectel/index.html) -- [SignalFx](/docs/providers/signalfx/index.html) -- [Skytap](/docs/providers/skytap/index.html) -- [SoftLayer](/docs/providers/softlayer/index.html) -- [Spotinst](/docs/providers/spotinst/index.html) -- [StackPath](/docs/providers/stackpath/index.html) -- [StatusCake](/docs/providers/statuscake/index.html) -- [Sumo Logic](/docs/providers/sumologic/index.html) -- [TelefonicaOpenCloud](/docs/providers/telefonicaopencloud/index.html) -- [Template](/docs/providers/template/index.html) -- [TencentCloud](/docs/providers/tencentcloud/index.html) -- [Terraform](/docs/providers/terraform/index.html) -- [Terraform Cloud](/docs/providers/tfe/index.html) -- [Time](/docs/providers/time/index.html) -- [TLS](/docs/providers/tls/index.html) -- [Triton](/docs/providers/triton/index.html) -- [Turbot](/docs/providers/turbot/index.html) -- [UCloud](/docs/providers/ucloud/index.html) -- [UltraDNS](/docs/providers/ultradns/index.html) -- [Vault](/docs/providers/vault/index.html) -- [Venafi](/docs/providers/venafi/index.html) -- [VMware Cloud](/docs/providers/vmc/index.html) -- [VMware NSX-T](/docs/providers/nsxt/index.html) -- [VMware vCloud Director](/docs/providers/vcd/index.html) -- [VMware vRA7](/docs/providers/vra7/index.html) -- [VMware vSphere](/docs/providers/vsphere/index.html) -- [Vultr](/docs/providers/vultr/index.html) -- [Wavefront](/docs/providers/wavefront/index.html) -- [Yandex](/docs/providers/yandex/index.html) +## Useful tools -
- ------ - -More providers can be found on our [Community Providers](/docs/providers/type/community-index.html) page. +- [Doc preview tool](https://registry.terraform.io/tools/doc-preview) +- [terraform-plugin-docs](https://github.com/hashicorp/terraform-plugin-docs) diff --git a/website/docs/providers/terraform/d/remote_state.html.md b/website/docs/providers/terraform/d/remote_state.html.md deleted file mode 100644 index 96cfdc4f6..000000000 --- a/website/docs/providers/terraform/d/remote_state.html.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -layout: "terraform" -page_title: "Terraform: terraform_remote_state" -sidebar_current: "docs-terraform-datasource-remote-state" -description: |- - Accesses state meta data from a remote backend. ---- - -# remote_state - -[backends]: /docs/backends/index.html - -Retrieves state data from a [Terraform backend][backends]. This allows you to -use the root-level outputs of one or more Terraform configurations as input data -for another configuration. - -Although this data source uses Terraform's [backends][], it doesn't have the -same limitations as the main backend configuration. You can use any number of -`remote_state` data sources with differently configured backends, and you can -use interpolations when configuring them. - -## Example Usage (`remote` Backend) - -```hcl -data "terraform_remote_state" "vpc" { - backend = "remote" - - config = { - organization = "hashicorp" - workspaces = { - name = "vpc-prod" - } - } -} - -# Terraform >= 0.12 -resource "aws_instance" "foo" { - # ... - subnet_id = data.terraform_remote_state.vpc.outputs.subnet_id -} - -# Terraform <= 0.11 -resource "aws_instance" "foo" { - # ... - subnet_id = "${data.terraform_remote_state.vpc.subnet_id}" -} -``` - -## Example Usage (`local` Backend) - -```hcl -data "terraform_remote_state" "vpc" { - backend = "local" - - config = { - path = "..." - } -} - -# Terraform >= 0.12 -resource "aws_instance" "foo" { - # ... - subnet_id = data.terraform_remote_state.vpc.outputs.subnet_id -} - -# Terraform <= 0.11 -resource "aws_instance" "foo" { - # ... - subnet_id = "${data.terraform_remote_state.vpc.subnet_id}" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `backend` - (Required) The remote backend to use. -* `workspace` - (Optional) The Terraform workspace to use, if the backend - supports workspaces. -* `config` - (Optional; object) The configuration of the remote backend. - Although this argument is listed as optional, most backends require - some configuration. - - The `config` object can use any arguments that would be valid in the - equivalent `terraform { backend "" { ... } }` block. See - [the documentation of your chosen backend](/docs/backends/types/index.html) - for details. - - -> **Note:** If the backend configuration requires a nested block, specify - it here as a normal attribute with an object value. (For example, - `workspaces = { ... }` instead of `workspaces { ... }`.) -* `defaults` - (Optional; object) Default values for outputs, in case the state - file is empty or lacks a required output. - -## Attributes Reference - -In addition to the above, the following attributes are exported: - -* (v0.12+) `outputs` - An object containing every root-level - [output](/docs/configuration/outputs.html) in the remote state. -* (<= v0.11) `` - Each root-level [output](/docs/configuration/outputs.html) - in the remote state appears as a top level attribute on the data source. - -## Root Outputs Only - -Only the root-level outputs from the remote state are accessible. Outputs from -modules within the state cannot be accessed. If you want a module output or a -resource attribute to be accessible via a remote state, you must thread the -output through to a root output. - -For example: - -```hcl -module "app" { - source = "..." -} - -output "app_value" { - value = "${module.app.value}" -} -``` - -In this example, the output `value` from the "app" module is available as -`app_value`. If this root level output hadn't been created, then a remote state -resource wouldn't be able to access the `value` output on the module. diff --git a/website/docs/providers/terraform/index.html.markdown b/website/docs/providers/terraform/index.html.markdown deleted file mode 100644 index 73f47735e..000000000 --- a/website/docs/providers/terraform/index.html.markdown +++ /dev/null @@ -1,42 +0,0 @@ ---- -layout: "terraform" -page_title: "Provider: Terraform" -sidebar_current: "docs-terraform-index" -description: |- - The Terraform provider is used to access meta data from shared infrastructure. ---- - -# Terraform Provider - -The terraform provider provides access to outputs from the Terraform state -of shared infrastructure. - -Use the navigation to the left to read about the available data sources. - -## Example Usage - -```hcl -# Shared infrastructure state stored in Atlas -data "terraform_remote_state" "vpc" { - backend = "remote" - - config = { - organization = "hashicorp" - workspaces = { - name = "vpc-prod" - } - } -} - -# Terraform >= 0.12 -resource "aws_instance" "foo" { - # ... - subnet_id = data.terraform_remote_state.vpc.outputs.subnet_id -} - -# Terraform <= 0.11 -resource "aws_instance" "foo" { - # ... - subnet_id = "${data.terraform_remote_state.vpc.subnet_id}" -} -``` diff --git a/website/docs/providers/type/cloud-index.html.markdown b/website/docs/providers/type/cloud-index.html.markdown deleted file mode 100644 index 7e2416432..000000000 --- a/website/docs/providers/type/cloud-index.html.markdown +++ /dev/null @@ -1,62 +0,0 @@ ---- -layout: "docs" -page_title: "Cloud Providers" -sidebar_current: "docs-providers-cloud" -description: |- - Category for standard cloud vendors. ---- - -#Cloud Providers - -This group includes cloud providers offering a range of services including IaaS, -SaaS, and PaaS offerings. This group of cloud providers includes some smaller -scale clouds or ones with more specialized offerings. The Terraform provider -and associated resources for these clouds are primarily supported by the cloud -vendor in close collaboration with HashiCorp, and are tested by HashiCorp. - ---- - - -- [Arukas](/docs/providers/arukas/index.html) -- [BaiduCloud](/docs/providers/baiducloud/index.html) -- [Brightbox](/docs/providers/brightbox/index.html) -- [CenturyLinkCloud](/docs/providers/clc/index.html) -- [CherryServers](/docs/providers/cherryservers/index.html) -- [Cisco ACI](/docs/providers/aci/index.html) -- [CloudScale.ch](/docs/providers/cloudscale/index.html) -- [CloudStack](/docs/providers/cloudstack/index.html) -- [DigitalOcean](/docs/providers/do/index.html) -- [EnterpriseCloud](/docs/providers/ecl/index.html) -- [Exoscale](/docs/providers/exoscale/index.html) -- [Fastly](/docs/providers/fastly/index.html) -- [FlexibleEngine](/docs/providers/flexibleengine/index.html) -- [Gridscale](/docs/providers/gridscale/index.html) -- [Hedvig](/docs/providers/hedvig/index.html) -- [Heroku](/docs/providers/heroku/index.html) -- [Hetzner Cloud](/docs/providers/hcloud/index.html) -- [HuaweiCloud](/docs/providers/huaweicloud/index.html) -- [HuaweiCloudStack](/docs/providers/huaweicloudstack/index.html) -- [JDCloud](/docs/providers/jdcloud/index.html) -- [KingsoftCloud](/docs/providers/ksyun/index.html) -- [Linode](/docs/providers/linode/index.html) -- [MetalCloud](/docs/providers/metalcloud/index.html) -- [Naver Cloud](/docs/providers/ncloud/index.html) -- [Nutanix](/docs/providers/nutanix/index.html) -- [OpenNebula](/docs/providers/opennebula/index.html) -- [OpenStack](/docs/providers/openstack/index.html) -- [OpenTelekomCloud](/docs/providers/opentelekomcloud/index.html) -- [OVH](/docs/providers/ovh/index.html) -- [Packet](/docs/providers/packet/index.html) -- [ProfitBricks](/docs/providers/profitbricks/index.html) -- [Scaleway](/docs/providers/scaleway/index.html) -- [Skytap](/docs/providers/skytap/index.html) -- [Selectel](/docs/providers/selectel/index.html) -- [SoftLayer](/docs/providers/softlayer/index.html) -- [StackPath](/docs/providers/stackpath/index.html) -- [TelefonicaOpenCloud](/docs/providers/telefonicaopencloud/index.html) -- [TencentCloud](/docs/providers/tencentcloud/index.html) -- [Triton](/docs/providers/triton/index.html) -- [UCloud](/docs/providers/ucloud/index.html) -- [Vultr](/docs/providers/vultr/index.html) -- [Yandex.Cloud](/docs/providers/yandex/index.html) -- [1&1](/docs/providers/oneandone/index.html) diff --git a/website/docs/providers/type/community-index.html.markdown b/website/docs/providers/type/community-index.html.markdown deleted file mode 100644 index 2c498ed27..000000000 --- a/website/docs/providers/type/community-index.html.markdown +++ /dev/null @@ -1,15 +0,0 @@ ---- -layout: "docs" -page_title: "Community Providers" -sidebar_current: "docs-providers-community" -description: |- - Category for community-built providers. ---- - -# Community Providers - -Discovering and sharing Terraform providers is now available directly in the Terraform Registry. Please visit [registry.terraform.io](https://registry.terraform.io/browse/providers) to get started. - --> **Note:** Use the “community” filter on the left to view providers published and maintained by community members. - -If you have created a new provider and would like to share it on the Registry, please see our [publishing instructions](https://www.terraform.io/docs/registry/providers/publishing.html) to learn how you can easily share it to other Terraform users. diff --git a/website/docs/providers/type/database-index.html.markdown b/website/docs/providers/type/database-index.html.markdown deleted file mode 100644 index 7a676c6d6..000000000 --- a/website/docs/providers/type/database-index.html.markdown +++ /dev/null @@ -1,23 +0,0 @@ ---- -layout: "docs" -page_title: "Database Providers" -sidebar_current: "docs-providers-database" -description: |- - Category for database vendors. ---- - -# Database Providers - -This is a group of database providers offer specific capabilities to provision -and configure your database resources. Terraform integrates with these -database services using the specific provider to provision and manages database -resources. These providers are primarily supported by the vendor in close -collaboration with HashiCorp, and are tested by HashiCorp. - ---- - - -- [InfluxDB](/docs/providers/influxdb/index.html) -- [MongoDB Atlas](/docs/providers/mongodbatlas/index.html) -- [MySQL](/docs/providers/mysql/index.html) -- [PostgreSQL](/docs/providers/postgresql/index.html) diff --git a/website/docs/providers/type/infra-index.html.markdown b/website/docs/providers/type/infra-index.html.markdown deleted file mode 100644 index 43989a3db..000000000 --- a/website/docs/providers/type/infra-index.html.markdown +++ /dev/null @@ -1,44 +0,0 @@ ---- -layout: "docs" -page_title: "Infrastructure Software Providers" -sidebar_current: "docs-providers-infra" -description: |- - Category for infrastructure management vendors. ---- - -# Infrastructure Software Providers - -This is a group of software providers offering specialized infrastructure -management capabilities such as configuration management. Terraform integrates -with these tools using the specific providers to enable these specialized tools -to execute tasks during the provisioning of infrastructure. These providers -are primarily supported by the vendor in close collaboration with HashiCorp, -and are tested by HashiCorp. - ---- - -- [Chef](/docs/providers/chef/index.html) -- [CloudAMQP](/docs/providers/cloudamqp/index.html) -- [Cohesity](/docs/providers/cohesity/index.html) -- [Consul](/docs/providers/consul/index.html) -- [Docker](/docs/providers/docker/index.html) -- [Dome9](/docs/providers/dome9/index.html) -- [Helm](/docs/providers/helm/index.html) -- [Kubernetes](/docs/providers/kubernetes/index.html) -- [Lacework](/docs/providers/lacework/index.html) -- [Mailgun](/docs/providers/mailgun/index.html) -- [Nomad](/docs/providers/nomad/index.html) -- [Okta](/docs/providers/okta/index.html) -- [Okta Advanced Server Access](/docs/providers/oktaasa/index.html) -- [RabbitMQ](/docs/providers/rabbitmq/index.html) -- [Rancher](/docs/providers/rancher/index.html) -- [Rancher2](/docs/providers/rancher2/index.html) -- [RightScale](/docs/providers/rightscale/index.html) -- [Rubrik](/docs/providers/rubrik/index.html) -- [Rundeck](/docs/providers/rundeck/index.html) -- [Spotinst](/docs/providers/spotinst/index.html) -- [Terraform](/docs/providers/terraform/index.html) -- [Terraform Cloud](/docs/providers/tfe/index.html) -- [Turbot](/docs/providers/turbot/index.html) -- [Vault](/docs/providers/vault/index.html) -- [Venafi](/docs/providers/venafi/index.html) diff --git a/website/docs/providers/type/major-index.html.markdown b/website/docs/providers/type/major-index.html.markdown deleted file mode 100644 index bde801276..000000000 --- a/website/docs/providers/type/major-index.html.markdown +++ /dev/null @@ -1,35 +0,0 @@ ---- -layout: "docs" -page_title: "Major Cloud Providers" -sidebar_current: "docs-providers-major" -description: |- - Category for major cloud vendors. ---- - -# Major Cloud Providers - -This group includes hyper-scale cloud providers that offer a range of services -including IaaS, SaaS, and PaaS. A large percentage of Terraform users provision -their infrastructure on these major cloud providers. HashiCorp closely partners -with these cloud providers to offer best-in-class integration to provision and -manage the majority of the services offered. These providers are primarily -supported by the cloud vendor in close collaboration with HashiCorp, and are -tested by HashiCorp. - ---- - - -- [Alibaba Cloud](/docs/providers/alicloud/index.html) -- [AWS](/docs/providers/aws/index.html) -- [Azure](/docs/providers/azurerm/index.html) -- [Azure DevOps](/docs/providers/azuredevops/index.html) -- [Azure Stack](/docs/providers/azurestack/index.html) -- [Google Cloud Platform](/docs/providers/google/index.html) -- [Oracle Cloud Infrastructure](/docs/providers/oci/index.html) -- [Oracle Cloud Platform](/docs/providers/oraclepaas/index.html) -- [Oracle Public Cloud](/docs/providers/opc/index.html) -- [VMware Cloud](/docs/providers/vmc/index.html) -- [VMware NSX-T](/docs/providers/nsxt/index.html) -- [vCloud Director](/docs/providers/vcd/index.html) -- [VMware vRA7](/docs/providers/vra7/index.html) -- [VMware vSphere](/docs/providers/vsphere/index.html) diff --git a/website/docs/providers/type/misc-index.html.markdown b/website/docs/providers/type/misc-index.html.markdown deleted file mode 100644 index 1efc87d59..000000000 --- a/website/docs/providers/type/misc-index.html.markdown +++ /dev/null @@ -1,29 +0,0 @@ ---- -layout: "docs" -page_title: "Misc Providers" -sidebar_current: "docs-providers-misc" -description: |- - Category for miscellaneous vendors. ---- - -# Miscellaneous Providers - -This is a group of miscellaneous providers offer specific capabilities that can -be useful when working with Terraform. These providers are primarily supported -by the vendors and the Terraform community, and are tested by HashiCorp. - ---- - -- [ACME](/docs/providers/acme/index.html) -- [Archive](/docs/providers/archive/index.html) -- [Cobbler](/docs/providers/cobbler/index.html) -- [External](/docs/providers/external/index.html) -- [Genymotion](/docs/providers/genymotion/index.html) -- [Ignition](/docs/providers/ignition/index.html) -- [Local](/docs/providers/local/index.html) -- [Netlify](/docs/providers/netlify/index.html) -- [Null](/docs/providers/null/index.html) -- [Random](/docs/providers/random/index.html) -- [Template](/docs/providers/template/index.html) -- [TLS](/docs/providers/tls/index.html) -- [Time](/docs/providers/time/index.html) diff --git a/website/docs/providers/type/monitor-index.html.markdown b/website/docs/providers/type/monitor-index.html.markdown deleted file mode 100644 index d28efc769..000000000 --- a/website/docs/providers/type/monitor-index.html.markdown +++ /dev/null @@ -1,39 +0,0 @@ ---- -layout: "docs" -page_title: "Monitor & Sys Management Providers" -sidebar_current: "docs-providers-monitor" -description: |- - Category for monitoring and system management vendors. ---- - -# Monitoring & System Management Providers - -This is a group of monitoring & system management providers that offer the -capability to configure and manage services such as loggers, metric tools, -and monitoring services. Terraform integrates with these services using the -specific provider to enable these specialized monitoring capabilities. These -providers are primarily supported by the vendor in close collaboration with -HashiCorp, and are tested by HashiCorp. - - ---- - - -- [Auth0](/docs/providers/auth0/index.html) -- [Circonus](/docs/providers/circonus/index.html) -- [Datadog](/docs/providers/datadog/index.html) -- [Dyn](/docs/providers/dyn/index.html) -- [Grafana](/docs/providers/grafana/index.html) -- [Icinga2](/docs/providers/icinga2/index.html) -- [LaunchDarkly](/docs/providers/launchdarkly/index.html) -- [Librato](/docs/providers/librato/index.html) -- [Logentries](/docs/providers/logentries/index.html) -- [LogicMonitor](/docs/providers/logicmonitor/index.html) -- [New Relic](https://registry.terraform.io/providers/newrelic/newrelic/latest/docs) -- [OpsGenie](/docs/providers/opsgenie/index.html) -- [PagerDuty](/docs/providers/pagerduty/index.html) -- [Runscope](/docs/providers/runscope/index.html) -- [SignalFx](/docs/providers/signalfx/index.html) -- [StatusCake](/docs/providers/statuscake/index.html) -- [Sumo Logic](/docs/providers/sumologic/index.html) -- [Wavefront](/docs/providers/wavefront/index.html) diff --git a/website/docs/providers/type/network-index.html.markdown b/website/docs/providers/type/network-index.html.markdown deleted file mode 100644 index d035eb840..000000000 --- a/website/docs/providers/type/network-index.html.markdown +++ /dev/null @@ -1,42 +0,0 @@ ---- -layout: "docs" -page_title: "Network Providers" -sidebar_current: "docs-providers-network" -description: |- - Category for network vendors. ---- - -# Network Providers - -This is a group of network providers that offer specific network capabilities -such as DNS, routing, and firewall configuration. The providers generally -offer a cloud-based service and Terraform integrates with these services using -the specific providers. These providers are primarily supported by the vendor -in close collaboration with HashiCorp, and are tested by HashiCorp. - ---- - - -- [Akamai](/docs/providers/akamai/index.html) -- [Avi Vantage](/docs/providers/avi/index.html) -- [Aviatrix](/docs/providers/aviatrix/index.html) -- [A10 Networks](/docs/providers/vthunder/index.html) -- [Check Point](/docs/providers/checkpoint/index.html) -- [Cloudflare](/docs/providers/cloudflare/index.html) -- [Cisco ASA](/docs/providers/ciscoasa/index.html) -- [Cisco MSO](/docs/providers/mso/index.html) -- [Constellix](/docs/providers/constellix/index.html) -- [DNS](/docs/providers/dns/index.html) -- [DNSimple](/docs/providers/dnsimple/index.html) -- [DNSMadeEasy](/docs/providers/dme/index.html) -- [F5 BIG-IP](/docs/providers/bigip/index.html) -- [FortiOS](/docs/providers/fortios/index.html) -- [HTTP](/docs/providers/http/index.html) -- [Incapsula](/docs/providers/incapsula/index.html) -- [Infoblox](/docs/providers/infoblox/index.html) -- [NS1](/docs/providers/ns1/index.html) -- [Palo Alto Networks PANOS](/docs/providers/panos/index.html) -- [Palo Alto Networks Prisma Cloud](/docs/providers/prismacloud/index.html) -- [PowerDNS](/docs/providers/powerdns/index.html) -- [Pureport](/docs/providers/pureport/index.html) -- [UltraDNS](/docs/providers/ultradns/index.html) diff --git a/website/docs/providers/type/vcs-index.html.markdown b/website/docs/providers/type/vcs-index.html.markdown deleted file mode 100644 index 6c1f77a8d..000000000 --- a/website/docs/providers/type/vcs-index.html.markdown +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: "docs" -page_title: "VCS Providers" -sidebar_current: "docs-providers-vcs" -description: |- - Category for version control vendors. ---- - -# Version Control Providers - -This is a group of Version Control System (VCS) providers that offer -capabilities of using Terraform to manage your VCS projects, teams and -repositories. Terraform integrates with these services to create and manage -resources provided by the VCS. These providers are primarily supported by the -vendor in close collaboration with HashiCorp, and are tested by HashiCorp. - ---- - - -- [Bitbucket](/docs/providers/bitbucket/index.html) -- [GitHub](/docs/providers/github/index.html) -- [GitLab](/docs/providers/gitlab/index.html) diff --git a/website/docs/registry/api.html.md b/website/docs/registry/api.html.md deleted file mode 100644 index 326a68d8c..000000000 --- a/website/docs/registry/api.html.md +++ /dev/null @@ -1,771 +0,0 @@ ---- -layout: "registry" -page_title: "Terraform Registry - HTTP API" -sidebar_current: "docs-registry-api" -description: |- - The /api endpoints list modules according to some criteria. ---- - -# HTTP API - -When downloading modules from registry sources such as the public -[Terraform Registry](https://registry.terraform.io/), Terraform CLI expects -the given hostname to support -[the module registry protocol](/docs/internals/module-registry-protocol.html), -which is the minimal API required for Terraform CLI to successfully retrieve -a module. - -The public Terraform Registry and the private registry included in Terraform -Cloud and Terraform Enterprise implement a superset of that minimal module -registry API to support additional use-cases such as searching for modules -across the whole registry, retrieving documentation and schemas for modules, -and so on. - -This page describes the extended API implemented by the official module -registry implementations, and is aimed at those intending to build clients -to work with registry data. Third-party implementations of the registry -protocol are not required to implement these extensions. If you intend to -implement your own module registry, please refer to -[the module registry protocol](/docs/internals/module-registry-protocol.html) -instead. - -Terraform Registry also has some additional internal API endpoints used to -support its UI. Any endpoints or properties not documented on this page are -subject to change over time. - -## Service Discovery - -The hostname portion of a module source address is first passed to -[the service discovery protocol](/docs/internals/remote-service-discovery.html) -to determine if the given host has a module registry and, if so, the base -URL for its module registry endpoints. - -The service identifier for this protocol is `modules.v1`, and the declared -URL should always end with a slash such that the paths shown in the following -sections can be appended to it. - -For example, if discovery produces the URL `https://modules.example.com/v1/` -then this API would use full endpoint URLs like -`https://modules.example.com/v1/{namespace}/{name}/{provider}/versions`. - -A module source address with no hostname is a shorthand for an address -on `registry.terraform.io`. You can perform service discovery on that hostname -to find the public Terraform Registry's module API endpoints. - -## Base URL - -The example request URLs shown in this document are for the public [Terraform -Registry](https://registry.terraform.io), and use its API `` of -`https://registry.terraform.io/v1/modules/`. Note that although the base URL in -the [discovery document](#service-discovery) _may include_ a trailing slash, we -include a slash after the placeholder in the `Path`s below for clarity. - -## List Modules - -These endpoints list modules according to some criteria. - -| Method | Path | Produces | -| ------ | ------------------------------------- | -------------------------- | -| `GET` | `` | `application/json` | -| `GET` | `/:namespace` | `application/json` | - -### Parameters - -- `namespace` `(string: )` - Restricts listing to modules published by - this user or organization. This is optionally specified as part of the URL - path. - -### Query Parameters - -- `offset`, `limit` `(int: )` - See [Pagination](#pagination) for details. -- `provider` `(string: )` - Limits modules to a specific provider. -- `verified` `(bool: )` - If `true`, limits results to only verified - modules. Any other value including none returns all modules _including_ - verified ones. - -### Sample Request - -```text -$ curl 'https://registry.terraform.io/v1/modules?limit=2&verified=true' -``` - -### Sample Response - -```json -{ - "meta": { - "limit": 2, - "current_offset": 0, - "next_offset": 2, - "next_url": "/v1/modules?limit=2&offset=2&verified=true" - }, - "modules": [ - { - "id": "GoogleCloudPlatform/lb-http/google/1.0.4", - "owner": "", - "namespace": "GoogleCloudPlatform", - "name": "lb-http", - "version": "1.0.4", - "provider": "google", - "description": "Modular Global HTTP Load Balancer for GCE using forwarding rules.", - "source": "https://github.com/GoogleCloudPlatform/terraform-google-lb-http", - "published_at": "2017-10-17T01:22:17.792066Z", - "downloads": 213, - "verified": true - }, - { - "id": "terraform-aws-modules/vpc/aws/1.5.1", - "owner": "", - "namespace": "terraform-aws-modules", - "name": "vpc", - "version": "1.5.1", - "provider": "aws", - "description": "Terraform module which creates VPC resources on AWS", - "source": "https://github.com/terraform-aws-modules/terraform-aws-vpc", - "published_at": "2017-11-23T10:48:09.400166Z", - "downloads": 29714, - "verified": true - } - ] -} -``` - -## Search Modules - -This endpoint allows searching modules. - -| Method | Path | Produces | -| ------ | ------------------------------------- | -------------------------- | -| `GET` | `/search` | `application/json` | - -### Query Parameters - -- `q` `(string: )` - The search string. Search syntax understood - depends on registry implementation. The public registry supports basic keyword - or phrase searches. -- `offset`, `limit` `(int: )` - See [Pagination](#pagination) for details. -- `provider` `(string: )` - Limits results to a specific provider. -- `namespace` `(string: )` - Limits results to a specific namespace. -- `verified` `(bool: )` - If `true`, limits results to only verified - modules. Any other value including none returns all modules _including_ - verified ones. - -### Sample Request - -```text -$ curl 'https://registry.terraform.io/v1/modules/search?q=network&limit=2' -``` - -### Sample Response - -```json -{ - "meta": { - "limit": 2, - "current_offset": 0, - "next_offset": 2, - "next_url": "/v1/modules/search?limit=2&offset=2&q=network" - }, - "modules": [ - { - "id": "zoitech/network/aws/0.0.3", - "owner": "", - "namespace": "zoitech", - "name": "network", - "version": "0.0.3", - "provider": "aws", - "description": "This module is intended to be used for configuring an AWS network.", - "source": "https://github.com/zoitech/terraform-aws-network", - "published_at": "2017-11-23T15:12:06.620059Z", - "downloads": 39, - "verified": false - }, - { - "id": "Azure/network/azurerm/1.1.1", - "owner": "", - "namespace": "Azure", - "name": "network", - "version": "1.1.1", - "provider": "azurerm", - "description": "Terraform Azure RM Module for Network", - "source": "https://github.com/Azure/terraform-azurerm-network", - "published_at": "2017-11-22T17:15:34.325436Z", - "downloads": 1033, - "verified": true - } - ] -} -``` - -## List Available Versions for a Specific Module - -This is the primary endpoint for resolving module sources, returning the -available versions for a given fully-qualified module. - -| Method | Path | Produces | -| ------ | ------------------------------------- | -------------------------- | -| `GET` | `/:namespace/:name/:provider/versions` | `application/json` | - -### Parameters - -- `namespace` `(string: )` - The user or organization the module is - owned by. This is required and is specified as part of the URL path. - -- `name` `(string: )` - The name of the module. - This is required and is specified as part of the URL path. - -- `provider` `(string: )` - The name of the provider. - This is required and is specified as part of the URL path. - -### Sample Request - -```text -$ curl https://registry.terraform.io/v1/modules/hashicorp/consul/aws/versions -``` - -### Sample Response - -The `modules` array in the response always includes the requested module as the -first element. Other elements of this list, if present, are dependencies of the -requested module that are provided to potentially avoid additional requests to -resolve these modules. - -Additional modules are not required to be provided but, when present, can be -used by Terraform to optimize the module installation process. - -Each returned module has an array of available versions, which Terraform -matches against any version constraints given in configuration. - -```json -{ - "modules": [ - { - "source": "hashicorp/consul/aws", - "versions": [ - { - "version": "0.0.1", - "submodules" : [ - { - "path": "modules/consul-cluster", - "providers": [ - { - "name": "aws", - "version": "" - } - ], - "dependencies": [] - }, - { - "path": "modules/consul-security-group-rules", - "providers": [ - { - "name": "aws", - "version": "" - } - ], - "dependencies": [] - }, - { - "providers": [ - { - "name": "aws", - "version": "" - } - ], - "dependencies": [], - "path": "modules/consul-iam-policies" - } - ], - "root": { - "dependencies": [], - "providers": [ - { - "name": "template", - "version": "" - }, - { - "name": "aws", - "version": "" - } - ] - } - } - ] - } - ] -} -``` - -## Download Source Code for a Specific Module Version - -This endpoint downloads the specified version of a module for a single provider. - -A successful response has no body, and includes the location from which the module -version's source can be downloaded in the `X-Terraform-Get` header. Note that -this string may contain special syntax interpreted by Terraform via -[`go-getter`](https://github.com/hashicorp/go-getter). See the [`go-getter` -documentation](https://github.com/hashicorp/go-getter#url-format) for details. - -The value of `X-Terraform-Get` may instead be a relative URL, indicated by -beginning with `/`, `./` or `../`, in which case it is resolved relative to -the full URL of the download endpoint. - -| Method | Path | Produces | -| ------ | ---------------------------- | -------------------------- | -| `GET` | `/:namespace/:name/:provider/:version/download` | `application/json` | - -### Parameters - -- `namespace` `(string: )` - The user the module is owned by. - This is required and is specified as part of the URL path. - -- `name` `(string: )` - The name of the module. - This is required and is specified as part of the URL path. - -- `provider` `(string: )` - The name of the provider. - This is required and is specified as part of the URL path. - -- `version` `(string: )` - The version of the module. - This is required and is specified as part of the URL path. - -### Sample Request - -```text -$ curl -i \ - https://registry.terraform.io/v1/modules/hashicorp/consul/aws/0.0.1/download -``` - -### Sample Response - -```text -HTTP/1.1 204 No Content -Content-Length: 0 -X-Terraform-Get: https://api.github.com/repos/hashicorp/terraform-aws-consul/tarball/v0.0.1//*?archive=tar.gz -``` - -## List Latest Version of Module for All Providers - -This endpoint returns the latest version of each provider for a module. - -| Method | Path | Produces | -| ------ | ---------------------------- | -------------------------- | -| `GET` | `/:namespace/:name` | `application/json` | - -### Parameters - -- `namespace` `(string: )` - The user or organization the module is - owned by. This is required and is specified as part of the URL path. - -- `name` `(string: )` - The name of the module. - This is required and is specified as part of the URL path. - -### Query Parameters - -- `offset`, `limit` `(int: )` - See [Pagination](#pagination) for details. - -### Sample Request - -```text -$ curl \ - https://registry.terraform.io/v1/modules/hashicorp/consul -``` - -### Sample Response - -```json -{ - "meta": { - "limit": 15, - "current_offset": 0 - }, - "modules": [ - { - "id": "hashicorp/consul/azurerm/0.0.1", - "owner": "gruntwork-team", - "namespace": "hashicorp", - "name": "consul", - "version": "0.0.1", - "provider": "azurerm", - "description": "A Terraform Module for how to run Consul on AzureRM using Terraform and Packer", - "source": "https://github.com/hashicorp/terraform-azurerm-consul", - "published_at": "2017-09-14T23:22:59.923047Z", - "downloads": 100, - "verified": false - }, - { - "id": "hashicorp/consul/aws/0.0.1", - "owner": "gruntwork-team", - "namespace": "hashicorp", - "name": "consul", - "version": "0.0.1", - "provider": "aws", - "description": "A Terraform Module for how to run Consul on AWS using Terraform and Packer", - "source": "https://github.com/hashicorp/terraform-aws-consul", - "published_at": "2017-09-14T23:22:44.793647Z", - "downloads": 113, - "verified": false - } - ] -} -``` - -## Latest Version for a Specific Module Provider - -This endpoint returns the latest version of a module for a single provider. - -| Method | Path | Produces | -| ------ | ---------------------------- | -------------------------- | -| `GET` | `/:namespace/:name/:provider` | `application/json` | - -### Parameters - -- `namespace` `(string: )` - The user the module is owned by. - This is required and is specified as part of the URL path. - -- `name` `(string: )` - The name of the module. - This is required and is specified as part of the URL path. - -- `provider` `(string: )` - The name of the provider. - This is required and is specified as part of the URL path. - -### Sample Request - -```text -$ curl \ - https://registry.terraform.io/v1/modules/hashicorp/consul/aws -``` - -### Sample Response - -Note this response has has some fields trimmed for clarity. - -```json -{ - "id": "hashicorp/consul/aws/0.0.1", - "owner": "gruntwork-team", - "namespace": "hashicorp", - "name": "consul", - "version": "0.0.1", - "provider": "aws", - "description": "A Terraform Module for how to run Consul on AWS using Terraform and Packer", - "source": "https://github.com/hashicorp/terraform-aws-consul", - "published_at": "2017-09-14T23:22:44.793647Z", - "downloads": 113, - "verified": false, - "root": { - "path": "", - "readme": "# Consul AWS Module\n\nThis repo contains a Module for how to deploy a [Consul]...", - "empty": false, - "inputs": [ - { - "name": "ami_id", - "description": "The ID of the AMI to run in the cluster. ...", - "default": "\"\"" - }, - { - "name": "aws_region", - "description": "The AWS region to deploy into (e.g. us-east-1).", - "default": "\"us-east-1\"" - } - ], - "outputs": [ - { - "name": "num_servers", - "description": "" - }, - { - "name": "asg_name_servers", - "description": "" - } - ], - "dependencies": [], - "resources": [] - }, - "submodules": [ - { - "path": "modules/consul-cluster", - "readme": "# Consul Cluster\n\nThis folder contains a [Terraform](https://www.terraform.io/) ...", - "empty": false, - "inputs": [ - { - "name": "cluster_name", - "description": "The name of the Consul cluster (e.g. consul-stage). This variable is used to namespace all resources created by this module.", - "default": "" - }, - { - "name": "ami_id", - "description": "The ID of the AMI to run in this cluster. Should be an AMI that had Consul installed and configured by the install-consul module.", - "default": "" - } - ], - "outputs": [ - { - "name": "asg_name", - "description": "" - }, - { - "name": "cluster_size", - "description": "" - } - ], - "dependencies": [], - "resources": [ - { - "name": "autoscaling_group", - "type": "aws_autoscaling_group" - }, - { - "name": "launch_configuration", - "type": "aws_launch_configuration" - } - ] - } - ], - "providers": [ - "aws", - "azurerm" - ], - "versions": [ - "0.0.1" - ] -} -``` - -## Get a Specific Module - -This endpoint returns the specified version of a module for a single provider. - -| Method | Path | Produces | -| ------ | ---------------------------- | -------------------------- | -| `GET` | `/:namespace/:name/:provider/:version` | `application/json` | - -### Parameters - -- `namespace` `(string: )` - The user the module is owned by. - This is required and is specified as part of the URL path. - -- `name` `(string: )` - The name of the module. - This is required and is specified as part of the URL path. - -- `provider` `(string: )` - The name of the provider. - This is required and is specified as part of the URL path. - -- `version` `(string: )` - The version of the module. - This is required and is specified as part of the URL path. - -### Sample Request - -```text -$ curl \ - https://registry.terraform.io/v1/modules/hashicorp/consul/aws/0.0.1 -``` - -### Sample Response - -Note this response has has some fields trimmed for clarity. - - -```json -{ - "id": "hashicorp/consul/aws/0.0.1", - "owner": "gruntwork-team", - "namespace": "hashicorp", - "name": "consul", - "version": "0.0.1", - "provider": "aws", - "description": "A Terraform Module for how to run Consul on AWS using Terraform and Packer", - "source": "https://github.com/hashicorp/terraform-aws-consul", - "published_at": "2017-09-14T23:22:44.793647Z", - "downloads": 113, - "verified": false, - "root": { - "path": "", - "readme": "# Consul AWS Module\n\nThis repo contains a Module for how to deploy a [Consul]...", - "empty": false, - "inputs": [ - { - "name": "ami_id", - "description": "The ID of the AMI to run in the cluster. ...", - "default": "\"\"" - }, - { - "name": "aws_region", - "description": "The AWS region to deploy into (e.g. us-east-1).", - "default": "\"us-east-1\"" - } - ], - "outputs": [ - { - "name": "num_servers", - "description": "" - }, - { - "name": "asg_name_servers", - "description": "" - } - ], - "dependencies": [], - "resources": [] - }, - "submodules": [ - { - "path": "modules/consul-cluster", - "readme": "# Consul Cluster\n\nThis folder contains a [Terraform](https://www.terraform.io/) ...", - "empty": false, - "inputs": [ - { - "name": "cluster_name", - "description": "The name of the Consul cluster (e.g. consul-stage). This variable is used to namespace all resources created by this module.", - "default": "" - }, - { - "name": "ami_id", - "description": "The ID of the AMI to run in this cluster. Should be an AMI that had Consul installed and configured by the install-consul module.", - "default": "" - } - ], - "outputs": [ - { - "name": "asg_name", - "description": "" - }, - { - "name": "cluster_size", - "description": "" - } - ], - "dependencies": [], - "resources": [ - { - "name": "autoscaling_group", - "type": "aws_autoscaling_group" - }, - { - "name": "launch_configuration", - "type": "aws_launch_configuration" - } - ] - } - ], - "providers": [ - "aws", - "azurerm" - ], - "versions": [ - "0.0.1" - ] -} -``` - -## Download the Latest Version of a Module - -This endpoint downloads the latest version of a module for a single provider. - -It returns a 302 redirect whose `Location` header redirects the client to the -download endpoint (above) for the latest version. - -| Method | Path | Produces | -| ------ | ---------------------------- | -------------------------- | -| `GET` | `/:namespace/:name/:provider/download` | `application/json` | - -### Parameters - -- `namespace` `(string: )` - The user the module is owned by. - This is required and is specified as part of the URL path. - -- `name` `(string: )` - The name of the module. - This is required and is specified as part of the URL path. - -- `provider` `(string: )` - The name of the provider. - This is required and is specified as part of the URL path. - -### Sample Request - -```text -$ curl -i \ - https://registry.terraform.io/v1/modules/hashicorp/consul/aws/download -``` - -### Sample Response - -```text -HTTP/1.1 302 Found -Location: /v1/modules/hashicorp/consul/aws/0.0.1/download -Content-Length: 70 -Content-Type: text/html; charset=utf-8 - -Found. -``` - -## HTTP Status Codes - -The API follows regular HTTP status semantics. To make implementing a complete -client easier, some details on our policy and potential future status codes are -listed below. A robust client should consider how to handle all of the -following. - - - **Success:** Return status is `200` on success with a body or `204` if there - is no body data to return. - - **Redirects:** Moved or aliased endpoints redirect with a `301`. Endpoints - redirecting to the latest version of a module may redirect with `302` or - `307` to indicate that they logically point to different resources over time. - - **Client Errors:** Invalid requests will receive the relevant `4xx` status. - Except where noted below, the request should not be retried. - - **Rate Limiting:** Clients placing excessive load on the service might be - rate-limited and receive a `429` code. This should be interpreted as a sign - to slow down, and wait some time before retrying the request. - - **Service Errors:** The usual `5xx` errors will be returned for service - failures. In all cases it is safe to retry the request after receiving a - `5xx` response. - - **Load Shedding:** A `503` response indicates that the service is under load - and can't process your request immediately. As with other `5xx` errors you - may retry after some delay, although clients should consider being more - lenient with retry schedule in this case. - -## Error Responses - -When a `4xx` or `5xx` status code is returned. The response payload will look -like the following example: - -```json -{ - "errors": [ - "something bad happened" - ] -} -``` - -The `errors` key is a list containing one or more strings where each string -describes an error that has occurred. - -Note that it is possible that some `5xx` errors might result in a response that -is not in JSON format above due to being returned by an intermediate proxy. - -## Pagination - -Endpoints that return lists of results use a common pagination format. - -They accept positive integer query variables `offset` and `limit` which have the -usual SQL-like semantics. Each endpoint will have a default limit and a -default offset of `0`. Each endpoint will also apply a maximum limit, -requesting more results will just result in the maximum limit being used. - -The response for a paginated result set will look like: - -```json -{ - "meta": { - "limit": 15, - "current_offset": 15, - "next_offset": 30, - "prev_offset": 0, - }, - "": [] -} -``` -Note that: - - `next_offset` will only be present if there are more results available. - - `prev_offset` will only be present if not at `offset = 0`. - - `limit` is the actual limit that was applied, it may be lower than the requested limit param. - - The key for the result array varies based on the endpoint and will be the - type of result pluralized, for example `modules`. diff --git a/website/docs/registry/images/registry-issue.png b/website/docs/registry/images/registry-issue.png deleted file mode 100644 index 6d7c5fff6..000000000 Binary files a/website/docs/registry/images/registry-issue.png and /dev/null differ diff --git a/website/docs/registry/images/registry1.png b/website/docs/registry/images/registry1.png deleted file mode 100644 index 3bae1463c..000000000 Binary files a/website/docs/registry/images/registry1.png and /dev/null differ diff --git a/website/docs/registry/images/registry2.png b/website/docs/registry/images/registry2.png deleted file mode 100644 index e1e56f034..000000000 Binary files a/website/docs/registry/images/registry2.png and /dev/null differ diff --git a/website/docs/registry/images/user-account.png b/website/docs/registry/images/user-account.png deleted file mode 100644 index 06d4cfabe..000000000 Binary files a/website/docs/registry/images/user-account.png and /dev/null differ diff --git a/website/docs/registry/index.html.md b/website/docs/registry/index.html.md deleted file mode 100644 index 7e4a608df..000000000 --- a/website/docs/registry/index.html.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -layout: "registry" -page_title: "Terraform Registry" -sidebar_current: "docs-registry-home" -description: |- - The Terraform Registry is a repository of providers and modules written by the Terraform community. ---- - -# Terraform Registry - -The [Terraform Registry](https://registry.terraform.io) is an interactive resource for discovering a wide selection of integrations (providers) and configuration packages (modules) for use with Terraform. The Registry includes solutions developed by HashiCorp, third-party vendors, and our Terraform community. Our goal with the Registry is to provide plugins to manage any infrastructure API, pre-made modules to quickly configure common infrastructure components, and examples of how to write quality Terraform code. - -![screenshot: terraform registry landing page](./images/registry1.png) - -The Terraform Registry is integrated [directly into Terraform](/docs/configuration/provider-requirements.html) so you can directly specify providers and modules. Anyone can publish and consume providers and modules on the public [Terraform Registry](https://registry.terraform.io). (To publish private modules within your organization, you can use a [private registry](/docs/registry/private.html) or [reference repositories and other sources directly](/docs/modules/sources.html).) - -Use the navigation to the left to learn more about using the Terraform Registry. - -## Navigating the Registry - -The registry has a number of different categories for both modules and providers to help with navigating the large number of available options. Select a provider or module card to learn more, filter results to a [specific tier](./providers/index.html#provider-tiers-amp-namespaces), or use the search field at the top of the Registry to find what you’re looking for. (Note that search supports keyboard navigation.) - -![screenshot: terraform registry browse](./images/registry2.png) - -## User Account - -Anyone interested in publishing a provider or module can create an account and sign in to the Terraform Registry using a GitHub account. Click the "Sign-in" button, and follow the login prompts. Once you have authorized the use of your GitHub account and are signed in, you can publish both providers and modules directly from one of the repositories you manage. To learn more, see [Publishing to the Registry](/docs/registry/providers/publishing.html). - -![screenshot: terraform registry sign in](./images/user-account.png) - -## Getting Help - -We welcome any feedback about using or publishing to the Registry. Please reach out if you have any questions or issues with the Terraform Registry by sending us an [email](mailto:terraform-registry-beta@hashicorp.com). The providers and modules in The Terraform Registry are published and maintained either directly by HashiCorp, by trusted HashiCorp partners, or by members of the Terraform community ([see tiers & namespaces](./providers/index.html#provider-tiers-amp-namespaces)). If you run into issues or have additional contributions to make to a provider or module, you can submit a GitHub issue by selecting the "Report an issue" link on the detail view: - -![Provider report issue link](./images/registry-issue.png) diff --git a/website/docs/registry/modules/images/registry-verified.png b/website/docs/registry/modules/images/registry-verified.png deleted file mode 100644 index 806eb71ac..000000000 Binary files a/website/docs/registry/modules/images/registry-verified.png and /dev/null differ diff --git a/website/docs/registry/modules/publish.html.md b/website/docs/registry/modules/publish.html.md deleted file mode 100644 index 04628b682..000000000 --- a/website/docs/registry/modules/publish.html.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -layout: "registry" -page_title: "Terraform Registry - Publishing Modules" -sidebar_current: "docs-registry-publish" -description: |- - Anyone can publish and share modules on the Terraform Registry. ---- - -# Publishing Modules - -Anyone can publish and share modules on the [Terraform Registry](https://registry.terraform.io). - -Published modules support versioning, automatically generate documentation, -allow browsing version histories, show examples and READMEs, and more. We -recommend publishing reusable modules to a registry. - -Public modules are managed via Git and GitHub. Publishing a module takes only -a few minutes. Once a module is published, you can release a new version of -a module by simply pushing a properly formed Git tag. - -The registry extracts information about the module from the module's source. -The module name, provider, documentation, inputs/outputs, and dependencies are -all parsed and available via the UI or API, as well as the same information for -any submodules or examples in the module's source repository. - -## Requirements - -The list below contains all the requirements for publishing a module: - -- **GitHub.** The module must be on GitHub and must be a public repo. -This is only a requirement for the [public registry](https://registry.terraform.io). -If you're using a private registry, you may ignore this requirement. - -- **Named `terraform--`.** Module repositories must use this -three-part name format, where `` reflects the type of infrastructure the -module manages and `` is the main provider where it creates that -infrastructure. The `` segment can contain additional hyphens. Examples: -`terraform-google-vault` or `terraform-aws-ec2-instance`. - -- **Repository description.** The GitHub repository description is used -to populate the short description of the module. This should be a simple -one sentence description of the module. - -- **Standard module structure.** The module must adhere to the -[standard module structure](/docs/modules/index.html#standard-module-structure). -This allows the registry to inspect your module and generate documentation, -track resource usage, parse submodules and examples, and more. - -- **`x.y.z` tags for releases.** The registry uses tags to identify module -versions. Release tag names must be a [semantic version](http://semver.org), -which can optionally be prefixed with a `v`. For example, `v1.0.4` and `0.9.2`. -To publish a module initially, at least one release tag must be present. Tags -that don't look like version numbers are ignored. - -## Publishing a Public Module - -With the requirements met, you can publish a public module by going to -the [Terraform Registry](https://registry.terraform.io) and clicking the -"Upload" link in the top navigation. - -If you're not signed in, this will ask you to connect with GitHub. We only -ask for access to public repositories, since the public registry may only -publish public modules. We require access to hooks so we can register a webhook -with your repository. We require access to your email address so that we can -email you alerts about your module. We will not spam you. - -The upload page will list your available repositories, filtered to those that -match the [naming convention described above](#Requirements). This is shown in -the screenshot below. Select the repository of the module you want to add and -click "Publish Module." - -In a few seconds, your module will be created. - -![Publish Module flow animation](/assets/images/docs/registry-publish.gif) - -## Releasing New Versions - -The Terraform Registry uses tags to detect releases. - -Tag names must be a valid [semantic version](http://semver.org), optionally -prefixed with a `v`. Example of valid tags are: `v1.0.1` and `0.9.4`. To publish -a new module, you must already have at least one tag created. - -To release a new version, create and push a new tag with the proper format. -The webhook will notify the registry of the new version and it will appear -on the registry usually in less than a minute. - -If your version doesn't appear properly, you may force a sync with GitHub -by viewing your module on the registry and clicking "Resync Module" -under the "Manage Module" dropdown. This process may take a few minutes. -Please only do this if you do not see the version appear, since it will -cause the registry to resync _all versions_ of your module. diff --git a/website/docs/registry/modules/use.html.md b/website/docs/registry/modules/use.html.md deleted file mode 100644 index 2a3b2be91..000000000 --- a/website/docs/registry/modules/use.html.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -layout: "registry" -page_title: "Finding and Using Modules from the Terraform Registry" -sidebar_current: "docs-registry-use" -description: |- - The Terraform Registry makes it simple to find and use modules. ---- - -# Finding and Using Modules - -The [Terraform Registry](https://registry.terraform.io) makes it simple to -find and use modules. - -## Finding Modules - -Every page on the registry has a search field for finding -modules. Enter any type of module you're looking for (examples: "vault", -"vpc", "database") and resulting modules will be listed. The search query -will look at module name, provider, and description to match your search -terms. On the results page, filters can be used further refine search results. - -By default, only [verified modules](/docs/registry/modules/verified.html) -are shown in search results. Verified modules are reviewed by HashiCorp to -ensure stability and compatibility. By using the filters, you can view unverified -modules as well. - -## Using Modules - -The Terraform Registry is integrated directly into Terraform, so a Terraform -configuration can refer to any module published in the registry. The syntax for -specifying a registry module is `//`. For example: -`hashicorp/consul/aws`. - -~> **Note:** Module registry integration was added in Terraform v0.10.6, and full versioning support in v0.11.0. - -When viewing a module on the registry on a tablet or desktop, usage instructions -are shown on the right side. -You can copy and paste this to get started with any module. Some modules -have required inputs you must set before being able to use the module. - -```hcl -module "consul" { - source = "hashicorp/consul/aws" - version = "0.1.0" -} -``` - -The `terraform init` command will download and cache any modules referenced by -a configuration. - -### Private Registry Module Sources - -You can also use modules from a private registry, like the one provided by -Terraform Cloud. Private registry modules have source strings of the form -`///`. This is the same format as the -public registry, but with an added hostname prefix. - -```hcl -module "vpc" { - source = "app.terraform.io/example_corp/vpc/aws" - version = "0.9.3" -} -``` - -Depending on the registry you're using, you might also need to configure -credentials to access modules. See your registry's documentation for details. -[Terraform Cloud's private registry is documented here.](/docs/cloud/registry/index.html) - -Private registry module sources are supported in Terraform v0.11.0 and -newer. - -## Module Versions - -Each module in the registry is versioned. These versions syntactically must -follow [semantic versioning](http://semver.org/). In addition to pure syntax, -we encourage all modules to follow the full guidelines of semantic versioning. - -Terraform since version 0.11 will resolve any provided -[module version constraints](/docs/configuration/modules.html#module-versions) and -using them is highly recommended to avoid pulling in breaking changes. - -Terraform versions after 0.10.6 but before 0.11 have partial support for the registry -protocol, but always download the latest version instead of honoring version -constraints. diff --git a/website/docs/registry/modules/verified.html.md b/website/docs/registry/modules/verified.html.md deleted file mode 100644 index 357770f98..000000000 --- a/website/docs/registry/modules/verified.html.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -layout: "registry" -page_title: "Terraform Registry - Verified Modules" -sidebar_current: "docs-registry-verified" -description: |- - Verified modules are reviewed by HashiCorp and actively maintained by contributors to stay up-to-date and compatible with both Terraform and their respective providers. ---- - -# Verified Modules - -Verified modules are reviewed by HashiCorp and actively maintained by contributors to stay up-to-date and compatible with both Terraform and their respective providers. - -The verified badge appears next to modules that are published by a verified source. - -![Verified module listing](./images/registry-verified.png) - -Verified modules are expected to be actively maintained by HashiCorp partners. -The verified badge isn’t indicative of flexibility or feature support; very -simple modules can be verified just because they're great examples of modules. -Likewise, an unverified module could be extremely high quality and actively -maintained. An unverified module shouldn't be assumed to be poor quality, it -only means it hasn't been created by a HashiCorp partner. - -When [using registry modules](/docs/registry/modules/use.html), there is no -difference between a verified and unverified module; they are used the same -way. diff --git a/website/docs/registry/private.html.md b/website/docs/registry/private.html.md deleted file mode 100644 index 80a8b7502..000000000 --- a/website/docs/registry/private.html.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -layout: "registry" -page_title: "Terraform Registry - Private Registry" -sidebar_current: "docs-registry-private" -description: |- - Terraform can load private modules from private registries via Terraform Cloud. ---- - -# Private Registries - -The registry at [registry.terraform.io](https://registry.terraform.io) -only hosts public modules, but most organizations have some modules that -can't, shouldn't, or don't need to be public. - -You can load private modules [directly from version control and other -sources](/docs/modules/sources.html), but those sources don't support [version -constraints](/docs/configuration/modules.html#module-versions) or a browsable -marketplace of modules, both of which are important for enabling a -producers-and-consumers content model in a large organization. - -If your organization is specialized enough that teams frequently use modules -created by other teams, you will benefit from a private module registry. - -## Terraform Cloud's Private Registry - -[Terraform Cloud](https://www.hashicorp.com/products/terraform) -includes a private module registry. It is available to all accounts, including free organizations. - -It uses the same VCS-backed tagged release workflow as the Terraform Registry, -but imports modules from your private VCS repos (on any of Terraform Cloud's supported VCS -providers) instead of requiring public GitHub repos. You can seamlessly -reference private modules in your Terraform configurations (just include a -hostname in the module source), and Terraform Cloud's UI provides a searchable marketplace -of private modules to help your users find the code they need. - -[Terraform Cloud's private module registry is documented here.](/docs/cloud/registry/index.html) - -## Other Private Registries - -Terraform can use versioned modules from any service that implements -[the registry API](/docs/registry/api.html). -The Terraform open source project does not provide a server implementation, but -we welcome community members to create their own private registries by following -the published protocol. - diff --git a/website/docs/registry/providers/docs.html.md b/website/docs/registry/providers/docs.html.md deleted file mode 100644 index cdbad83f0..000000000 --- a/website/docs/registry/providers/docs.html.md +++ /dev/null @@ -1,252 +0,0 @@ ---- -layout: "registry" -page_title: "Terraform Registry - Provider Documentation" -sidebar_current: "docs-registry-provider-docs" -description: |- - Expected document structure for publishing providers to the Terraform Registry. ---- - -# Provider Documentation - -The [Terraform Registry][terraform-registry] displays documentation for the providers it hosts. This page describes the expected format for provider documentation. - --> In order to test how documents will render in the Terraform Registry, you can use the [Terraform Registry Doc Preview Tool](https://registry.terraform.io/tools/doc-preview). - -## Publishing - -The Terraform Registry publishes providers from their Git repositories, creating a version for each Git tag that matches the [Semver](https://semver.org/) versioning format. Provider documentation is published automatically as part of the provider release process. - -Provider documentation is always tied to a provider version. A given version always displays the documentation from that version's Git commit, and the only way to publish updated documentation is to release a new version of the provider. - -### Storage Limits - -The maximum number of documents allowed for a single provider version is 1000. - -Each document can contain no more than 500KB of data. Documents which exceed this limit will be truncated, and a note will be displayed in the Terraform Registry. - -## Format - -Provider documentation should be a directory of Markdown documents in the provider repository. Each Markdown document is rendered as a separate page. The directory should include a document for the provider index, a document for each resource and data source, and optional documents for any guides. - -### Directory Structure - -| Location | Filename | Description | -|-|-|-| -| `docs/` | `index.md` | Index page for the provider. | -| `docs/guides/` | `.md` | Additional documentation for guides. | -| `docs/resources/` | `.md` | Information for a Resource. Filename should not include a `_` prefix. | -| `docs/data-sources/` | `.md` | Information on a provider data source. | - --> **Note:** In order to support provider docs which have already been formatted for publishing to [terraform.io][terraform-io-providers], the Terraform Registry also supports docs in a `website/docs/` legacy directory with file extensions of `.html.markdown` or `.html.md`. - -### Headers - -We strongly suggest that provider docs include the following sections to help users understand how to use the provider. Create additional sections if they would enhance usability of the resource (for example, “Imports” or “Customizable Timeouts”). - -#### Index Headers - - # Provider - - Summary of what the provider is for, including use cases and links to - app/service documentation. - - ## Example Usage - - ```hcl - // Code block with an example of how to use this provider. - ``` - - ## Argument Reference - - * List any arguments for the provider block. - -#### Resource/Data Source Headers - - # Resource/Data Source - - Description of what this resource does, with links to official - app/service documentation. - - ## Example Usage - - ```hcl - // Code block with an example of how to use this resource. - ``` - - ## Argument Reference - - * `attribute_name` - (Optional/Required) List arguments this resource takes. - - ## Attribute Reference - - * `attribute_name` - List attributes that this resource exports. - -### YAML Frontmatter - -Markdown source files may contain YAML frontmatter, which provides organizational information and display hints. Frontmatter can be omitted for resources and data sources that don't require a subcategory. - -Frontmatter is not rendered in the Terraform Registry web UI. - -#### Example - -```markdown ---- -page_title: "Authenticating with Foo Service via OAuth" -subcategory: "Authentication" ---- -``` - -#### Supported Attributes - -The following frontmatter attributes are supported by the Terraform Registry: - -* **page_title** - The title of this document, which will display in the docs navigation. This is only required for documents in the `guides/` folder. -* **subcategory** - An optional additional layer of grouping that affects the display of the docs navigation; [see Subcategories below](#subcategories) for more details. Resources and data sources should be organized into subcategories if the number of resources would be difficult to quickly scan for a user. Guides should be separated into subcategories if there are multiple guides which fit into 2 or more distinct groupings. - -### Callouts - -If you start a paragraph with a special arrow-like sigil, it will become a colored callout box. You can't make multi-paragraph callouts. For colorblind users (and for clarity in general), callouts will automatically start with a strong-emphasized word to indicate their function. - -Sigil | Text prefix | Color -------|-------------------|------- -`->` | `**Note**` | blue -`~>` | `**Note**` | yellow -`!>` | `**Warning**` | red - -## Navigation Hierarchy - -Provider docs are organized by category: resources, data sources, and guides. At a minimum, a provider must contain an index (`docs/index.md`) and at least one resource or data source. - -### Typical Structure - -A provider named `example` with a resource and data source for `instance` would have these 3 files: - -``` -docs/ - index.md - data-sources/ - instance.md - resources/ - instance.md -``` - -After publishing this provider version, its page on the Terraform Registry would display a navigation which resembles this hierarchy: - -* example Provider -* Resources - * example_instance -* Data Sources - * example_instance - -### Subcategories - -To group these resources by a service or other dimension, add the optional `subcategory` field to the YAML frontmatter of the resource and data source: - -```markdown ---- -subcategory: "Compute" ---- -``` - -This would change the navigation hierarchy to the following: - -* example Provider -* Compute - * Resources - * example_instance - * Data Sources - * example_instance - -Resources and data sources without a subcategory will be rendered before any subcategories. - -The following subcategories will be rendered at the bottom of the list: - -* Beta -* Deprecated - -### Guides - -Providers can optionally include 1 or more guides. These can assist users in using the provider for certain scenarios. - -``` -docs/ - index.md - guides/ - authenticating.md - data-sources/ - instance.md - resources/ - instance.md -``` - -The title for guides is controlled with the `page_title` attribute in the YAML frontmatter: - -```markdown ---- -page_title: "Authenticating with Example Cloud" ---- -``` - -The `page_title` is used (instead of the filename) for rendering the link to this guide in the navigation: - -* example Provider -* Guides - * Authenticating with Example Cloud -* Resources - * example_instance -* Data Sources - * example_instance - -Guides are always rendered before resources, data sources, and any subcategories. - -If a `page_title` attribute is not found, the title will default to the filename without the extension. - -### Guides Subcategories - -If a provider has many guides, you can use subcategories to group them into separate top-level sections. For example, given the following directory structure: - -``` -docs/ - index.md - guides/ - authenticating-basic.md - authenticating-oauth.md - setup.md - data-sources/ - instance.md - resources/ - instance.md -``` - -Assuming that these three guides have titles similar to their filenames, and the first two include `subcategory: "Authentication"` in their frontmatter, the Terraform Registry would display this navigation structure: - -* example Provider -* Guides - * Initial Setup -* Authentication - * Authenticating with Basic Authentication - * Authenticating with OAuth -* Resources - * example_instance -* Data Sources - * example_instance - -Guides without a subcategory are always rendered before guides with subcategories. Both are always rendered before resources and data sources. - -## Migrating Legacy Providers Docs - -For most provider docs already published to [terraform.io][terraform-io-providers], no changes are required to publish them to the Terraform Registry. - -~> **Important:** The only exceptions are providers which organize resources, data sources, or guides into subcategories. See the [Subcategories](#subcategories) section above for more information. - -If you want to publish docs on the Terraform Registry that are not currently published to terraform.io, take the following steps to migrate to the newer format: - -1. Move the `website/docs/` folder to `docs/` -2. Expand the folder names to match the Terraform Registry's expected format: - * Rename `docs/d/` to `docs/data-sources/` - * Rename `docs/r/` to `docs/resources/` -3. Change file suffixes from `.html.markdown` or `.html.md` to `.md`. - -[terraform-registry]: https://registry.terraform.io -[terraform-registry-providers]: https://registry.terraform.io/browse/providers -[terraform-io-providers]: https://www.terraform.io/docs/providers/ diff --git a/website/docs/registry/providers/images/archived-tier.png b/website/docs/registry/providers/images/archived-tier.png deleted file mode 100644 index 2849da327..000000000 Binary files a/website/docs/registry/providers/images/archived-tier.png and /dev/null differ diff --git a/website/docs/registry/providers/images/community-tier.png b/website/docs/registry/providers/images/community-tier.png deleted file mode 100644 index fe5d10006..000000000 Binary files a/website/docs/registry/providers/images/community-tier.png and /dev/null differ diff --git a/website/docs/registry/providers/images/github-oauth-permissions.png b/website/docs/registry/providers/images/github-oauth-permissions.png deleted file mode 100644 index 279ac4cf4..000000000 Binary files a/website/docs/registry/providers/images/github-oauth-permissions.png and /dev/null differ diff --git a/website/docs/registry/providers/images/official-tier.png b/website/docs/registry/providers/images/official-tier.png deleted file mode 100644 index 0110f8f4f..000000000 Binary files a/website/docs/registry/providers/images/official-tier.png and /dev/null differ diff --git a/website/docs/registry/providers/images/publishing.png b/website/docs/registry/providers/images/publishing.png deleted file mode 100644 index df1786b66..000000000 Binary files a/website/docs/registry/providers/images/publishing.png and /dev/null differ diff --git a/website/docs/registry/providers/images/verified-tier.png b/website/docs/registry/providers/images/verified-tier.png deleted file mode 100644 index 201e243f5..000000000 Binary files a/website/docs/registry/providers/images/verified-tier.png and /dev/null differ diff --git a/website/docs/registry/providers/index.html.md b/website/docs/registry/providers/index.html.md deleted file mode 100644 index 411f44f23..000000000 --- a/website/docs/registry/providers/index.html.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -layout: "registry" -page_title: "Terraform Registry - Providers Overview" -description: |- - Overview of providers in the Terraform Registry ---- - -# Overview - -Providers are how Terraform integrates with any upstream API. - -The Terraform Registry is the main source for publicly available Terraform providers. It offers a browsable and searchable interface for finding providers, and makes it possible for Terraform CLI to automatically install any of the providers it hosts. - -If you want Terraform to support a new infrastructure service, you can create your own provider using Terraform's Go SDK. Once you've developed a provider, you can use the Registry to share it with the rest of the community. - -## Using Providers From the Registry - -The Registry is directly integrated with Terraform. To use any provider from the Registry, all you need to do is require it within your Terraform configuration; Terraform can then automatically install that provider when initializing a working directory, and your configuration can take advantage of any resources implemented by that provider. - -For more information, see: - -- [Configuration Language: Provider Requirements](/docs/configuration/provider-requirements.html) - -## Provider Tiers & Namespaces - -Terraform providers are published and maintained by a variety of sources, including HashiCorp, HashiCorp Technology Partners, and the Terraform community. The Registry uses tiers and badges to denote the source of a provider. Additionally, namespaces are used to help users identify the organization or publisher responsible for the integration, as shown in the table below. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TierDescriptionNamespace
Official providers are owned and maintained by HashiCorp hashicorp
Verified providers are owned and maintained by third-party technology partners. Providers in this tier indicate HashiCorp has verified the authenticity of the Provider’s publisher, and that the partner is a member of the HashiCorp Technology Partner Program.Third-party organization, e.g. mongodb/mongodbatlas
Community providers are published to the Terraform Registry by individual maintainers, groups of maintainers, or other members of the Terraform community.
Maintainer’s individual or organization account, e.g. DeviaVir/gsuite
Archived Providers are Official or Verified Providers that are no longer maintained by HashiCorp or the community. This may occur if an API is deprecated or interest was low.hashicorp or third-party
-

- -## Verified Provider Development Program - -If your organization is interested in joining our Provider Development Program (which sets the standards for publishing providers and modules with a `Verified` badge), please take a look at our [Program Details](/guides/terraform-provider-development-program.html) for further information. diff --git a/website/docs/registry/providers/os-arch.html.md b/website/docs/registry/providers/os-arch.html.md deleted file mode 100644 index 930e9902b..000000000 --- a/website/docs/registry/providers/os-arch.html.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -layout: "registry" -page_title: "Recommended Provider Binary Operating Systems and Architectures - Terraform Registry" -sidebar_current: "docs-registry-provider-os-arch" -description: |- - Recommended Provider Binary Operating Systems and Architectures ---- - -# Recommended Provider Binary Operating Systems and Architectures - -We recommend the following operating system / architecture combinations for compiled binaries available in the registry (this list is already satisfied by our [recommended **.goreleaser.yml** configuration file](https://github.com/hashicorp/terraform-provider-scaffolding/blob/master/.goreleaser.yml)): - -* Darwin / AMD64 -* Linux / AMD64 (this is **required** for usage in Terraform Cloud, see below) -* Linux / ARMv8 (sometimes referred to as AArch64 or ARM64) -* Linux / ARMv6 -* Windows / AMD64 - -We also recommend shipping binaries for the following combinations, but we typically do not prioritize fixes for these: - -* Linux / 386 -* Windows / 386 -* FreeBSD / 386 -* FreeBSD / AMD64 - -## Terraform Cloud Compatibility - -To ensure your provider can run in Terraform Cloud, please include a Linux / AMD64 binary. This binary should also not have CGO enabled and should not depend on command line execution of any external tools or binaries. We cannot guaruntee availibility of any package/library/binary within the Terraform Cloud images. diff --git a/website/docs/registry/providers/publishing.html.md b/website/docs/registry/providers/publishing.html.md deleted file mode 100644 index fa6b96ff6..000000000 --- a/website/docs/registry/providers/publishing.html.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -layout: "registry" -page_title: "Terraform Registry - Publishing Providers" -sidebar_current: "docs-registry-provider-publishing" -description: |- - Publishing Providers to the Terraform Registry ---- - -# Publishing Providers - -Anyone can publish and share a provider by signing into the Registry using their GitHub account and following a few additional steps. - -This page describes how to prepare a [Terraform Provider](/docs/plugins/provider.html) for publishing, and how to publish a prepared provider using the Registry's interface. - -## Preparing your Provider - -### Writing a Provider - -Providers published to the Terraform Registry are written and built in the same way as other Terraform providers. A variety of resources are available to help our contributors build a quality integration: - -- The [Call APIs with Terraform Providers](https://learn.hashicorp.com/collections/terraform/providers?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn -- [How to build a provider – Video](https://www.youtube.com/watch?v=2BvpqmFpchI) -- [Sample provider developed by a HashiCorp partner](https://blog.container-solutions.com/write-terraform-provider-part-1) -- Example providers for reference: - - [AWS](https://github.com/terraform-providers/terraform-provider-aws) - - [AzureRM](https://github.com/terraform-providers/terraform-provider-azurerm) -- [Contributing to Terraform guidelines](/docs/extend/community/contributing.html) - -~> **Important:** In order to be detected by the Terraform Registry, all provider repositories on GitHub must match the pattern `terraform-provider-{NAME}`, and the repository must be public. - -### Documenting your Provider - -Your provider should contain an overview document (index.md), as well as a doc for each resource and data-source. See [Documenting Providers](./docs.html) for details about how to ensure your provider documentation renders properly on the Terraform Registry. - --> **Note:** In order to test how documents will render in the Terraform Registry, you can use the [Terraform Registry Doc Preview Tool](https://registry.terraform.io/tools/doc-preview). - -### Creating a GitHub Release - -Publishing a provider requires at least one version be available on GitHub Releases. The tag must be a valid [Semantic Version](https://semver.org/) preceded with a `v` (for example, `v1.2.3`). - -Terraform CLI and the Terraform Registry follow the Semantic Versioning specification when detecting a valid version, sorting versions, solving version constraints, and choosing the latest version. Prerelease versions are supported (available if explicitly defined but not chosen automatically) with a hyphen (-) delimiter, such as `v1.2.3-pre`. - -We have a list of [recommend OS / architecture combinations](/docs/registry/providers/os-arch.html) for which we suggest most providers create binaries. - -~> **Important:** Avoid modifying or replacing an already-released version of a provider, as this will cause checksum errors for users when attempting to download the plugin. Instead, if changes are necessary, please release as a new version. - -#### GitHub Actions (Preferred) - -[GitHub Actions](https://docs.github.com/en/actions) allow you to execute workflows when events on your repository occur. You can use this to publish provider releases to the Terraform Registry whenever a new version tag is created on your repository. - -To use GitHub Actions to publish new provider releases to the Terraform Registry: - -1. Create and export a signing key that you plan on using to sign your provider releases. See [Preparing and Adding a Signing Key](#preparing-and-adding-a-signing-key) for more information. -1. Copy the [GoReleaser configuration from the terraform-provider-scaffolding repository](https://github.com/hashicorp/terraform-provider-scaffolding/blob/master/.goreleaser.yml) to the root of your repository. -1. Copy the [GitHub Actions workflow from the terraform-provider-scaffolding repository](https://github.com/hashicorp/terraform-provider-scaffolding/blob/master/.github/workflows/release.yml) to `.github/workflows/release.yml` in your repository. -1. Go to *Settings > Secrets* in your repository, and add the following secrets: - * `GPG_PRIVATE_KEY` - Your ASCII-armored GPG private key. You can export this with `gpg --armor --export-secret-keys [key ID or email]`. - * `PASSPHRASE` - The passphrase for your GPG private key. -1. Push a new valid version tag (e.g. `v1.2.3`) to test that the GitHub Actions releaser is working. - -Once a release is created, you can move on to [Publishing to the Registry](#publishing-to-the-registry). - -#### Using GoReleaser locally - -GoReleaser is a tool for building Go projects for multiple platforms, creating a checksums file, and signing the release. It can also upload your release to GitHub Releases. - -1. Install [GoReleaser](https://goreleaser.com) using the [installation instructions](https://goreleaser.com/install/). -1. Copy the [.goreleaser.yml file](https://github.com/hashicorp/terraform-provider-scaffolding/blob/master/.goreleaser.yml) from the [hashicorp/terraform-provider-scaffolding](https://github.com/hashicorp/terraform-provider-scaffolding) repository. -1. Cache the password for your GPG private key with `gpg --armor --detach-sign` (see note below). -1. Set your `GITHUB_TOKEN` to a [Personal Access Token](https://github.com/settings/tokens/new?scopes=public_repo) that has the **public_repo** scope. -1. Tag your version with `git tag v1.2.3`. -1. Build, sign, and upload your release with `goreleaser release --rm-dist`. - --> GoReleaser does not support signing binaries with a GPG key that requires a passphrase. Some systems may cache your GPG passphrase for a few minutes. If you are unable to cache the passphrase for GoReleaser, please use the manual release preparation process below, or remove the signature step from GoReleaser and sign it prior to moving the GitHub release from draft to published. - -#### Manually Preparing a Release - -If for some reason you're not able to use GoReleaser to build, sign, and upload your release, you can create the required assets by following these steps, or encode them into a Makefile or shell script. - -The release must meet the following criteria: - -* There are 1 or more zip files containing the built provider binary for a single architecture - * The binary name is `terraform-provider-{NAME}_v{VERSION}` - * The archive name is `terraform-provider-{NAME}_{VERSION}_{OS}_{ARCH}.zip` -* There is a `terraform-provider-{NAME}_{VERSION}_SHA256SUMS` file, which contains a sha256 sum for each zip file in the release. - * `shasum -a 256 *.zip > terraform-provider-{NAME}_{VERSION}_SHA256SUMS` -* There is a `terraform-provider-{NAME}_{VERSION}_SHA256SUMS.sig` file, which is a valid GPG signature of the `terraform-provider-{NAME}_{VERSION}_SHA256SUMS` file using the keypair. - * `gpg --detach-sign terraform-provider-{NAME}_{VERSION}_SHA256SUMS` -* Release is finalized (not a private draft). - -## Publishing to the Registry - -### Signing in - -Before publishing a provider, you must first sign in to the Terraform Registry with a GitHub account (see [Signing into the Registry](/docs/registry/index.html#creating-an-account)). The GitHub account used must have the following permission scopes on the provider repository you’d like to publish. Permissions can be verified by going to your [GitHub Settings](https://github.com/settings/connections/applications/) and selecting the Terraform Registry Application under Authorized OAuth Apps. - -![screenshot: terraform registry github oauth required permissions](./images/github-oauth-permissions.png) - -### Preparing and Adding a Signing Key - -All provider releases are required to be signed, thus you must provide HashiCorp with the public key for the GPG keypair that you will be signing releases with. The Terraform Registry will validate that the release is signed with this key when publishing each version, and Terraform will verify this during `terraform init`. - -- Generate a GPG key to be used when signing releases (See [GitHub's detailed instructions](https://docs.github.com/en/github/authenticating-to-github/generating-a-new-gpg-key) for help with this step, but you do not need to add the key to GitHub) -- Export your public key in ASCII-armor format using the following command, substituting the GPG key ID created in the step above: - -```console -$ gpg --armor --export "{Key ID or email address}" -``` - -The ASCII-armored public key to the Terraform Registry by going to [User Settings > Signing Keys](https://registry.terraform.io/settings/gpg-keys). You can add keys for your personal namespace, or any organization which you are an admin of. - - -### Publishing Your Provider - -In the top-right navigation, select [Publish > Provider](https://registry.terraform.io/publish/provider) to begin the publishing process. Follow the prompts to select the organization and repository you would like to publish. - -#### Terms of Use - -Anything published to the Terraform Registry is subject to our terms of use. A copy of the terms are available for viewing at https://registry.terraform.io/terms - -### Support - -If you experience issues publishing your provider to the Terraform Registry, please contact us at [terraform-registry@hashicorp.com](mailto:terraform-registry@hashicorp.com). diff --git a/website/docs/state/environments.html.md b/website/docs/state/environments.html.md deleted file mode 100644 index dfc8f7618..000000000 --- a/website/docs/state/environments.html.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -layout: "docs" -page_title: "State: Environments" -sidebar_current: "docs-state-env" -description: |- - Legacy terminology for "Workspaces". ---- - -# State Environments - -The term _state environment_, or just _environment_, was used within the -Terraform 0.9 releases to refer to the idea of having multiple distinct, -named states associated with a single configuration directory. - -After this concept was implemented, we received feedback that this terminology -caused confusion due to other uses of the word "environment", both within -Terraform itself and within organizations using Terraform. - -As of 0.10, the preferred term is "workspace". For more information on -workspaces, see [the main Workspaces page](/docs/state/workspaces.html). diff --git a/website/guides/core-workflow.html.md b/website/guides/core-workflow.html.md index c7f50a6f8..e307728e4 100644 --- a/website/guides/core-workflow.html.md +++ b/website/guides/core-workflow.html.md @@ -1,5 +1,5 @@ --- -layout: "guides" +layout: "intro" page_title: "The Core Terraform Workflow - Guides" sidebar_current: "guides-core-workflow" description: |- @@ -235,7 +235,7 @@ for a better experience at each step. Terraform Cloud provides a centralized and secure location for storing input variables and state while also bringing back a tight feedback loop for speculative plans for config authors. Terraform configuration interacts with -Terraform Cloud via the ["remote" backend](/docs/backends/types/remote.html). +Terraform Cloud via the ["remote" backend](/docs/language/settings/backends/remote.html). ``` terraform { diff --git a/website/guides/index.html.md b/website/guides/index.html.md index 8eb707384..987d38531 100644 --- a/website/guides/index.html.md +++ b/website/guides/index.html.md @@ -1,5 +1,5 @@ --- -layout: "guides" +layout: "intro" page_title: "Guides" sidebar_current: "guides-home" description: |- diff --git a/website/guides/terraform-provider-development-program.html.md b/website/guides/terraform-provider-development-program.html.md index 9acba17a3..59d5ffc64 100644 --- a/website/guides/terraform-provider-development-program.html.md +++ b/website/guides/terraform-provider-development-program.html.md @@ -13,7 +13,7 @@ The Verified badge helps users easily identify and discover integrations develop ![Verified Provider Card](/assets/images/docs/verified-card.png) --> **Building your own provider?** If you're building your own provider and aren't interested in having HashiCorp officially verify and regularly monitor your provider, please refer to the [Writing Custom Providers guide](https://www.terraform.io/docs/extend/writing-custom-providers.html) and the [Extending Terraform](https://www.terraform.io/docs/extend/index.html) section. +-> **Building your own provider?** If you're building your own provider and aren't interested in having HashiCorp officially verify and regularly monitor your provider, please refer to the [Call APIs with Terraform Providers](https://learn.hashicorp.com/collections/terraform/providers?utm_source=WEBSITEhttps://www.terraform.io/docs/extend/writing-custom-providers.htmlutm_medium=WEB_IOhttps://www.terraform.io/docs/extend/writing-custom-providers.htmlutm_offer=ARTICLE_PAGEhttps://www.terraform.io/docs/extend/writing-custom-providers.htmlutm_content=DOCS) collection on HashiCorp Learn and the [Extending Terraform](https://www.terraform.io/docs/extend/index.html) section of the documentation. ## What is a Terraform Provider? @@ -66,7 +66,7 @@ The provider development process is divided into five steps below. By following ![Provider Development Process](/assets/images/docs/program-steps.png) 1. **Apply**: Initial contact between vendor and HashiCorp -2. **Prepare**: Follow documentation while developing the provider +2. **Prepare**: Follow documentation while developing the provider 3. **Verify**: Share public GPG key with HashiCorp 4. **Publish**: Release the provider on the Registry 5. **Support**: Ongoing maintenance and support of the provider by the vendor. @@ -112,7 +112,7 @@ We’ve found the provider development process to be fairly straightforward and ### 3. Verify -At this stage, it is expected that the provider is fully developed, all tests and documentation are in place, and your provider is ready for publishing. In this step, HashiCorp will verify the source and authenticity of the namespace being used to publish the provider by signing your GPG key with a trust signature. +At this stage, it is expected that the provider is fully developed, all tests and documentation are in place, and your provider is ready for publishing. In this step, HashiCorp will verify the source and authenticity of the namespace being used to publish the provider by signing your GPG key with a trust signature. -> **Important:** This step requires that you have signed and accepted our Technology Partner Agreement. If you have not received this, please see step #1 above. @@ -128,7 +128,7 @@ $ gpg --armor --export "{Key ID or email address}" Once the verification step is complete please follow the steps on [Publishing a Provider](https://www.terraform.io/docs/registry/providers/publishing.html). This step does not require additional involvement from HashiCorp as publishing is a fully self-service process in the [Terraform Registry](https://registry.terraform.io). -Once completed, your provider should be visible in the Terraform Registry and usable in Terraform. Please confirm that everything looks good, and that documentation is rendering properly. +Once completed, your provider should be visible in the Terraform Registry and usable in Terraform. Please confirm that everything looks good, and that documentation is rendering properly. ### 5. Maintain & Support diff --git a/website/intro/examples/aws.html.markdown b/website/intro/examples/aws.html.markdown deleted file mode 100644 index 0a9449e8f..000000000 --- a/website/intro/examples/aws.html.markdown +++ /dev/null @@ -1,28 +0,0 @@ ---- -layout: "intro" -page_title: "Two-Tier AWS Architecture" -sidebar_current: "examples-aws" -description: |- - This provides a template for running a simple two-tier architecture on Amazon Web services. The premise is that you have stateless app servers running behind an ELB serving traffic. ---- - -# Two-Tier AWS Architecture - -[**Example Source Code**](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/two-tier) - -This provides a template for running a simple two-tier architecture on Amazon -Web Services. The premise is that you have stateless app servers running behind -an ELB serving traffic. - -To simplify the example, it intentionally ignores deploying and -getting your application onto the servers. However, you could do so either via -[provisioners](/docs/provisioners/index.html) and a configuration -management tool, or by pre-baking configured AMIs with -[Packer](https://www.packer.io). - -After you run `terraform apply` on this configuration, it will -automatically output the DNS address of the ELB. After your instance -registers, this should respond with the default Nginx web page. - -As with all the examples, just copy and paste the example and run -`terraform apply` to see it work. diff --git a/website/intro/examples/consul.html.markdown b/website/intro/examples/consul.html.markdown deleted file mode 100644 index 3f8223ffa..000000000 --- a/website/intro/examples/consul.html.markdown +++ /dev/null @@ -1,57 +0,0 @@ ---- -layout: "intro" -page_title: "Consul Example" -sidebar_current: "examples-consul" -description: |- - Consul is a tool for service discovery, configuration and orchestration. The Key/Value store it provides is often used to store application configuration and information about the infrastructure necessary to process requests. ---- - -# Consul Example - -[**Example Source Code**](https://github.com/terraform-providers/terraform-provider-consul/tree/master/examples/kv) - -[Consul](https://www.consul.io) is a tool for service discovery, configuration -and orchestration. The Key/Value store it provides is often used to store -application configuration and information about the infrastructure necessary -to process requests. - -Terraform provides a [Consul provider](/docs/providers/consul/index.html) which -can be used to interface with Consul from inside a Terraform configuration. - -For our example, we use the [Consul demo cluster](https://demo.consul.io/) -to both read configuration and store information about a newly created EC2 instance. -The size of the EC2 instance will be determined by the `tf_test/size` key in Consul, -and will default to `m1.small` if that key does not exist. Once the instance is created -the `tf_test/id` and `tf_test/public_dns` keys will be set with the computed -values for the instance. - -Before we run the example, use the [Web UI](https://demo.consul.io/ui/dc1/kv/) -to set the `tf_test/size` key to `t1.micro`. Once that is done, -copy the configuration into a configuration file (`consul.tf` works fine). -Either provide the AWS credentials as a default value in the configuration -or invoke `apply` with the appropriate variables set. - -Once the `apply` has completed, we can see the keys in Consul by -visiting the [Web UI](https://demo.consul.io/ui/dc1/kv/). We can see -that the `tf_test/id` and `tf_test/public_dns` values have been -set. - -You can now [tear down the infrastructure](https://learn.hashicorp.com/tutorials/terraform/aws-destroy?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). -Because we set the `delete` property of two of the Consul keys, Terraform -will clean up those keys on destroy. We can verify this by using -the Web UI. - -This example has shown that Consul can be used with Terraform both to read -existing values and to store generated results. - -Inputs like AMI name, security groups, Puppet roles, bootstrap scripts, -etc can all be loaded from Consul. This allows the specifics of an -infrastructure to be decoupled from its overall architecture. This enables -details to be changed without updating the Terraform configuration. - -Outputs from Terraform can also be easily stored in Consul. One powerful -feature this enables is using Consul for inventory management. If an -application relies on ELB for routing, Terraform can update the application's -configuration directly by setting the ELB address into Consul. Any resource -attribute can be stored in Consul, allowing an operator to capture anything -useful. diff --git a/website/intro/examples/count.markdown b/website/intro/examples/count.markdown deleted file mode 100644 index e60649246..000000000 --- a/website/intro/examples/count.markdown +++ /dev/null @@ -1,20 +0,0 @@ ---- -layout: "intro" -page_title: "Count" -sidebar_current: "examples-count" -description: |- - The count parameter on resources can simplify configurations and let you scale resources by simply incrementing a number. ---- - -# Count Example - -[**Example Source Code**](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/count) - -The `count` parameter on resources can simplify configurations -and let you scale resources by simply incrementing a number. - -Additionally, variables can be used to expand a list of resources -for use elsewhere. - -As with all the examples, just copy and paste the example and run -`terraform apply` to see it work. diff --git a/website/intro/examples/cross-provider.markdown b/website/intro/examples/cross-provider.markdown deleted file mode 100644 index 381d4c49a..000000000 --- a/website/intro/examples/cross-provider.markdown +++ /dev/null @@ -1,21 +0,0 @@ ---- -layout: "intro" -page_title: "Cross Provider" -sidebar_current: "examples-cross-provider" -description: |- - An example of the cross-provider capabilities of Terraform. ---- - -# Cross Provider Example - -[**Example Source Code**](https://github.com/hashicorp/terraform/tree/master/examples/cross-provider) - -This is a simple example of the cross-provider capabilities of -Terraform. - -This creates a Heroku application and points a DNS -CNAME record at the result via DNSimple. A `host` query to the outputted -hostname should reveal the correct DNS configuration. - -As with all the examples, just copy and paste the example and run -`terraform apply` to see it work. diff --git a/website/intro/examples/index.html.markdown b/website/intro/examples/index.html.markdown deleted file mode 100644 index 8a0318a94..000000000 --- a/website/intro/examples/index.html.markdown +++ /dev/null @@ -1,60 +0,0 @@ ---- -layout: "intro" -page_title: "Example Configurations" -sidebar_current: "examples" -description: |- - These examples are designed to help you understand some of the ways Terraform can be used. ---- - -# Example Configurations - -The examples in this section illustrate some -of the ways Terraform can be used. - -All examples are ready to run as-is. Terraform will -ask for input of things such as variables and API keys. If you want to -continue using the example, you should save those parameters in a -"terraform.tfvars" file or in a `provider` config block. - -~> **Warning!** The examples use real providers that launch _real_ resources. -That means they can cost money to experiment with. To avoid unexpected charges, -be sure to understand the price of resources before launching them, and verify -any unneeded resources are cleaned up afterwards. - -Experimenting in this way can help you learn how the Terraform lifecycle -works, as well as how to repeatedly create and destroy infrastructure. - -If you're completely new to Terraform, we recommend reading the -[Terraform: Get Started](https://learn.hashicorp.com/collections/terraform/aws-get-started?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) collection on HashiCorp Learn before diving into -the examples. However, due to the intuitive configuration Terraform -uses it isn't required. - -## Examples - -Our examples are distributed across several repos. [This README file in the Terraform repo has links to all of them.](https://github.com/hashicorp/terraform/tree/master/examples) - -To use these examples, Terraform must first be installed on your machine. -You can install Terraform from the [downloads page](/downloads.html). -Once installed, you can download, view, and run the examples. - -To use an example, clone the repository that contains it and navigate to its directory. For example, to try the AWS two-tier architecture example: - -``` -git clone https://github.com/terraform-providers/terraform-provider-aws.git -cd terraform-provider-aws/examples/two-tier -``` - -You can then use your preferred code editor to browse and read the configurations. -To try out an example, run Terraform's init and apply commands while in the example's directory: - -``` -$ terraform init -... -$ terraform apply -... -``` - -Terraform will interactively ask for variable input and potentially -provider configuration, and will start executing. - -When you're done with the example, run `terraform destroy` to clean up. diff --git a/website/intro/use-cases.html.markdown b/website/intro/use-cases.html.markdown index 0c545cf59..e2d3f26fd 100644 --- a/website/intro/use-cases.html.markdown +++ b/website/intro/use-cases.html.markdown @@ -91,7 +91,7 @@ This configuration can then be used by Terraform to automatically setup and modi settings by interfacing with the control layer. This allows configuration to be versioned and changes to be automated. As an example, [AWS VPC](https://aws.amazon.com/vpc/) is one of the most commonly used SDN implementations, and [can be configured by -Terraform](/docs/providers/aws/r/vpc.html). +Terraform](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc). ## Resource Schedulers diff --git a/website/layouts/backend-types.erb b/website/layouts/backend-types.erb deleted file mode 100644 index 14dacac61..000000000 --- a/website/layouts/backend-types.erb +++ /dev/null @@ -1,88 +0,0 @@ -<% wrap_layout :inner do %> - <% content_for :sidebar do %> -

Terraform CLI

- - - - <%= partial("layouts/otherdocs", :locals => { :skip => "Terraform CLI" }) %> - <% end %> - - <%= yield %> -<% end %> diff --git a/website/layouts/commands-providers.erb b/website/layouts/commands-providers.erb deleted file mode 100644 index a0587364a..000000000 --- a/website/layouts/commands-providers.erb +++ /dev/null @@ -1,35 +0,0 @@ -<% wrap_layout :inner do %> - <% content_for :sidebar do %> -

Terraform CLI

- - - - <%= partial("layouts/otherdocs", :locals => { :skip => "Terraform CLI" }) %> - - <% end %> - - <%= yield %> -<% end %> diff --git a/website/layouts/commands-state.erb b/website/layouts/commands-state.erb deleted file mode 100644 index f5a98d329..000000000 --- a/website/layouts/commands-state.erb +++ /dev/null @@ -1,57 +0,0 @@ -<% wrap_layout :inner do %> - <% content_for :sidebar do %> -

Terraform CLI

- - - - <%= partial("layouts/otherdocs", :locals => { :skip => "Terraform CLI" }) %> - - <% end %> - - <%= yield %> -<% end %> diff --git a/website/layouts/commands-workspace.erb b/website/layouts/commands-workspace.erb deleted file mode 100644 index 624026cbf..000000000 --- a/website/layouts/commands-workspace.erb +++ /dev/null @@ -1,45 +0,0 @@ -<% wrap_layout :inner do %> - <% content_for :sidebar do %> -

Terraform CLI

- - - - <%= partial("layouts/otherdocs", :locals => { :skip => "Terraform CLI" }) %> - - <% end %> - - <%= yield %> -<% end %> diff --git a/website/layouts/docs.erb b/website/layouts/docs.erb index 8d6d32e75..7ebf352d6 100644 --- a/website/layouts/docs.erb +++ b/website/layouts/docs.erb @@ -1,537 +1,568 @@ <% wrap_layout :inner do %> <% content_for :sidebar do %> -

Terraform CLI

+

Terraform CLI