Merge pull request #3223 from scalp42/typos

remove various typos
This commit is contained in:
Radek Simko 2015-09-14 07:48:59 +01:00
commit 57bea9f26c
52 changed files with 76 additions and 76 deletions

View File

@ -532,7 +532,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
})
if err != nil {
log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn)
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var dt []*rds.Tag
@ -716,7 +716,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
}
}
// seperate request to promote a database
// separate request to promote a database
if d.HasChange("replicate_source_db") {
if d.Get("replicate_source_db").(string) == "" {
// promote

View File

@ -253,7 +253,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
})
if err != nil {
log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn)
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var et []*elasticache.Tag

View File

@ -30,7 +30,7 @@ func resourceAwsIamUser() *schema.Resource {
name. The only way to locate a user by UniqueID is to list them
all and that would make this provider unnecessarilly complex
and inefficient. Still, there are other reasons one might want
the UniqueID, so we can make it availible.
the UniqueID, so we can make it available.
*/
"unique_id": &schema.Schema{
Type: schema.TypeString,

View File

@ -295,7 +295,7 @@ func testIngressRuleLength(networkAcl *ec2.NetworkAcl, length int) resource.Test
}
}
// There is always a default rule (ALL Traffic ... DENY)
// so we have to increase the lenght by 1
// so we have to increase the length by 1
if len(ingressEntries) != length+1 {
return fmt.Errorf("Invalid number of ingress entries found; count = %d", len(ingressEntries))
}

View File

@ -96,7 +96,7 @@ func resourceAwsNetworkInterfaceCreate(d *schema.ResourceData, meta interface{})
private_ips := d.Get("private_ips").(*schema.Set).List()
if len(private_ips) != 0 {
request.PrivateIpAddresses = expandPrivateIPAddesses(private_ips)
request.PrivateIpAddresses = expandPrivateIPAddresses(private_ips)
}
log.Printf("[DEBUG] Creating network interface")
@ -133,7 +133,7 @@ func resourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) e
eni := describeResp.NetworkInterfaces[0]
d.Set("subnet_id", eni.SubnetId)
d.Set("private_ips", flattenNetworkInterfacesPrivateIPAddesses(eni.PrivateIpAddresses))
d.Set("private_ips", flattenNetworkInterfacesPrivateIPAddresses(eni.PrivateIpAddresses))
d.Set("security_groups", flattenGroupIdentifiers(eni.Groups))
d.Set("source_dest_check", eni.SourceDestCheck)

View File

@ -378,7 +378,7 @@ func expandStringList(configured []interface{}) []*string {
}
//Flattens an array of private ip addresses into a []string, where the elements returned are the IP strings e.g. "192.168.0.0"
func flattenNetworkInterfacesPrivateIPAddesses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string {
func flattenNetworkInterfacesPrivateIPAddresses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string {
ips := make([]string, 0, len(dtos))
for _, v := range dtos {
ip := *v.PrivateIpAddress
@ -398,7 +398,7 @@ func flattenGroupIdentifiers(dtos []*ec2.GroupIdentifier) []string {
}
//Expands an array of IPs into a ec2 Private IP Address Spec
func expandPrivateIPAddesses(ips []interface{}) []*ec2.PrivateIpAddressSpecification {
func expandPrivateIPAddresses(ips []interface{}) []*ec2.PrivateIpAddressSpecification {
dtos := make([]*ec2.PrivateIpAddressSpecification, 0, len(ips))
for i, v := range ips {
new_private_ip := &ec2.PrivateIpAddressSpecification{

View File

@ -189,7 +189,7 @@ func TestExpandIPPerms_NegOneProtocol(t *testing.T) {
}
// Now test the error case. This *should* error when either from_port
// or to_port is not zero, but protocal is "-1".
// or to_port is not zero, but protocol is "-1".
errorCase := []interface{}{
map[string]interface{}{
"protocol": "-1",
@ -497,13 +497,13 @@ func TestexpandInstanceString(t *testing.T) {
}
}
func TestflattenNetworkInterfacesPrivateIPAddesses(t *testing.T) {
func TestflattenNetworkInterfacesPrivateIPAddresses(t *testing.T) {
expanded := []*ec2.NetworkInterfacePrivateIpAddress{
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.1")},
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.2")},
}
result := flattenNetworkInterfacesPrivateIPAddesses(expanded)
result := flattenNetworkInterfacesPrivateIPAddresses(expanded)
if result == nil {
t.Fatal("result was nil")
@ -543,7 +543,7 @@ func TestflattenGroupIdentifiers(t *testing.T) {
}
}
func TestexpandPrivateIPAddesses(t *testing.T) {
func TestexpandPrivateIPAddresses(t *testing.T) {
ip1 := "192.168.0.1"
ip2 := "192.168.0.2"
@ -552,7 +552,7 @@ func TestexpandPrivateIPAddesses(t *testing.T) {
ip2,
}
result := expandPrivateIPAddesses(flattened)
result := expandPrivateIPAddresses(flattened)
if len(result) != 2 {
t.Fatalf("expected result had %d elements, but got %d", 2, len(result))

View File

@ -165,7 +165,7 @@ func resourceAzureDnsServerUpdate(d *schema.ResourceData, meta interface{}) erro
}
// resourceAzureDnsServerExists does all the necessary API calls to
// check if the DNS server definition alredy exists on Azure.
// check if the DNS server definition already exists on Azure.
func resourceAzureDnsServerExists(d *schema.ResourceData, meta interface{}) (bool, error) {
azureClient := meta.(*Client)
vnetClient := azureClient.vnetClient

View File

@ -52,7 +52,7 @@ func TestAccAzureInstance_separateHostedService(t *testing.T) {
CheckDestroy: testAccCheckAzureInstanceDestroyed(testAccHostedServiceName),
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAzureInstance_seperateHostedService,
Config: testAccAzureInstance_separateHostedService,
Check: resource.ComposeTestCheckFunc(
testAccCheckAzureInstanceExists(
"azure_instance.foo", testAccHostedServiceName, &dpmt),
@ -384,7 +384,7 @@ resource "azure_instance" "foo" {
}
}`, instanceName, testAccStorageServiceName)
var testAccAzureInstance_seperateHostedService = fmt.Sprintf(`
var testAccAzureInstance_separateHostedService = fmt.Sprintf(`
resource "azure_hosted_service" "foo" {
name = "%s"
location = "West US"

View File

@ -137,7 +137,7 @@ func resourceAzureSqlDatabaseServiceUpdate(d *schema.ResourceData, meta interfac
sqlClient := azureClient.sqlClient
serverName := d.Get("database_server_name").(string)
// changes to the name must occur seperately from changes to the attributes:
// changes to the name must occur separately from changes to the attributes:
if d.HasChange("name") {
oldv, newv := d.GetChange("name")
@ -188,7 +188,7 @@ func resourceAzureSqlDatabaseServiceUpdate(d *schema.ResourceData, meta interfac
log.Println("[INFO] Issuing Azure Database Service parameter update.")
reqID, err := sqlClient.UpdateDatabase(serverName, name, updateParams)
if err != nil {
return fmt.Errorf("Failed issuing Azure SQL Service paramater update: %s", err)
return fmt.Errorf("Failed issuing Azure SQL Service parameter update: %s", err)
}
log.Println("[INFO] Waiting for Azure SQL Database Service parameter update.")

View File

@ -125,7 +125,7 @@ func resourceAzureStorageBlobRead(d *schema.ResourceData, meta interface{}) erro
// resourceAzureStorageBlobUpdate does all the necessary API calls to
// update a blob on Azure.
func resourceAzureStorageBlobUpdate(d *schema.ResourceData, meta interface{}) error {
// NOTE: although empty as most paramters have ForceNew set; this is
// NOTE: although empty as most parameters have ForceNew set; this is
// still required in case of changes to the storage_service_key
// run the ExistsFunc beforehand to ensure the resource's existence nonetheless:

View File

@ -173,7 +173,7 @@ var CLOUDSTACK_NETWORK_2_OFFERING = os.Getenv("CLOUDSTACK_NETWORK_2_OFFERING")
// An IP address in CLOUDSTACK_NETWORK_2_CIDR
var CLOUDSTACK_NETWORK_2_IPADDRESS = os.Getenv("CLOUDSTACK_NETWORK_2_IPADDRESS")
// A network that already exists and isnt CLOUDSTACK_NETWORK_1
// A network that already exists and isn't CLOUDSTACK_NETWORK_1
var CLOUDSTACK_2ND_NIC_NETWORK = os.Getenv("CLOUDSTACK_2ND_NIC_NETWORK")
// An IP address in CLOUDSTACK_2ND_NIC_NETWORK

View File

@ -93,7 +93,7 @@ func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) erro
p.SetSize(int64(d.Get("size").(int)))
}
// If there is a project supplied, we retreive and set the project id
// If there is a project supplied, we retrieve and set the project id
if project, ok := d.GetOk("project"); ok {
// Retrieve the project UUID
projectid, e := retrieveUUID(cs, "project", project.(string))

View File

@ -362,7 +362,7 @@ func resourceCloudStackEgressFirewallUpdate(d *schema.ResourceData, meta interfa
// Then loop through all the currently configured rules and create the new ones
for _, rule := range nrs.List() {
// When succesfully deleted, re-create it again if it still exists
// When successfully deleted, re-create it again if it still exists
err := resourceCloudStackEgressFirewallCreateRule(
d, meta, rule.(map[string]interface{}))

View File

@ -362,7 +362,7 @@ func resourceCloudStackFirewallUpdate(d *schema.ResourceData, meta interface{})
// Then loop through all the currently configured rules and create the new ones
for _, rule := range nrs.List() {
// When succesfully deleted, re-create it again if it still exists
// When successfully deleted, re-create it again if it still exists
err := resourceCloudStackFirewallCreateRule(
d, meta, rule.(map[string]interface{}))

View File

@ -153,7 +153,7 @@ func resourceCloudStackInstanceCreate(d *schema.ResourceData, meta interface{})
p.SetIpaddress(ipaddres.(string))
}
// If there is a project supplied, we retreive and set the project id
// If there is a project supplied, we retrieve and set the project id
if project, ok := d.GetOk("project"); ok {
// Retrieve the project UUID
projectid, e := retrieveUUID(cs, "project", project.(string))

View File

@ -74,7 +74,7 @@ func resourceCloudStackIPAddressCreate(d *schema.ResourceData, meta interface{})
p.SetVpcid(vpcid)
}
// If there is a project supplied, we retreive and set the project id
// If there is a project supplied, we retrieve and set the project id
if project, ok := d.GetOk("project"); ok {
// Retrieve the project UUID
projectid, e := retrieveUUID(cs, "project", project.(string))

View File

@ -125,7 +125,7 @@ func resourceCloudStackNetworkCreate(d *schema.ResourceData, meta interface{}) e
}
}
// If there is a project supplied, we retreive and set the project id
// If there is a project supplied, we retrieve and set the project id
if project, ok := d.GetOk("project"); ok {
// Retrieve the project UUID
projectid, e := retrieveUUID(cs, "project", project.(string))

View File

@ -417,7 +417,7 @@ func resourceCloudStackNetworkACLRuleUpdate(d *schema.ResourceData, meta interfa
// Then loop through all the currently configured rules and create the new ones
for _, rule := range nrs.List() {
// When succesfully deleted, re-create it again if it still exists
// When successfully deleted, re-create it again if it still exists
err := resourceCloudStackNetworkACLRuleCreateRule(d, meta, rule.(map[string]interface{}))
// We need to update this first to preserve the correct state

View File

@ -81,7 +81,7 @@ func resourceCloudStackVPCCreate(d *schema.ResourceData, meta interface{}) error
// Create a new parameter struct
p := cs.VPC.NewCreateVPCParams(d.Get("cidr").(string), displaytext.(string), name, vpcofferingid, zoneid)
// If there is a project supplied, we retreive and set the project id
// If there is a project supplied, we retrieve and set the project id
if project, ok := d.GetOk("project"); ok {
// Retrieve the project UUID
projectid, e := retrieveUUID(cs, "project", project.(string))

View File

@ -58,7 +58,7 @@ func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) er
domain, err := client.RetrieveDomain(d.Id())
if err != nil {
// If the domain is somehow already destroyed, mark as
// succesfully gone
// successfully gone
if strings.Contains(err.Error(), "404 Not Found") {
d.SetId("")
return nil

View File

@ -96,7 +96,7 @@ func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) er
rec, err := client.RetrieveRecord(domain, d.Id())
if err != nil {
// If the record is somehow already destroyed, mark as
// succesfully gone
// successfully gone
if strings.Contains(err.Error(), "404 Not Found") {
d.SetId("")
return nil
@ -152,7 +152,7 @@ func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{})
err := client.DestroyRecord(d.Get("domain").(string), d.Id())
if err != nil {
// If the record is somehow already destroyed, mark as
// succesfully gone
// successfully gone
if strings.Contains(err.Error(), "404 Not Found") {
return nil
}

View File

@ -68,7 +68,7 @@ func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) er
key, err := client.RetrieveSSHKey(d.Id())
if err != nil {
// If the key is somehow already destroyed, mark as
// succesfully gone
// successfully gone
if strings.Contains(err.Error(), "404 Not Found") {
d.SetId("")
return nil

View File

@ -97,7 +97,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
// Optimistic locking requires the fingerprint recieved to match
// Optimistic locking requires the fingerprint received to match
// the fingerprint we send the server, if there is a mismatch then we
// are working on old data, and must retry
err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata")
@ -197,7 +197,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
// Optimistic locking requires the fingerprint recieved to match
// Optimistic locking requires the fingerprint received to match
// the fingerprint we send the server, if there is a mismatch then we
// are working on old data, and must retry
err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata")

View File

@ -190,7 +190,7 @@ func resourceFWPolicyV1Delete(d *schema.ResourceData, meta interface{}) error {
return err
}
// This error usualy means that the policy is attached
// This error usually means that the policy is attached
// to a firewall. At this point, the firewall is probably
// being delete. So, we retry a few times.

View File

@ -500,7 +500,7 @@ func jobToResourceData(job *rundeck.JobDetail, d *schema.ResourceData) error {
"decription": option.Description,
"required": option.IsRequired,
"allow_multiple_values": option.AllowsMultipleValues,
"multi_value_delimeter": option.MultiValueDelimiter,
"multi_value_delimiter": option.MultiValueDelimiter,
"obscure_input": option.ObscureInput,
"exposed_to_scripts": option.ValueIsExposedToScripts,
}

View File

@ -149,7 +149,7 @@ func (c *ApplyCommand) Run(args []string) int {
}
}
// Setup the state hook for continous state updates
// Setup the state hook for continuous state updates
{
state, err := c.State()
if err != nil {

View File

@ -74,7 +74,7 @@ func TestApply_destroy(t *testing.T) {
}
// Should have a backup file
f, err = os.Open(statePath + DefaultBackupExtention)
f, err = os.Open(statePath + DefaultBackupExtension)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -188,7 +188,7 @@ func TestApply_destroyTargeted(t *testing.T) {
}
// Should have a backup file
f, err = os.Open(statePath + DefaultBackupExtention)
f, err = os.Open(statePath + DefaultBackupExtension)
if err != nil {
t.Fatalf("err: %s", err)
}

View File

@ -599,7 +599,7 @@ func TestApply_refresh(t *testing.T) {
}
// Should have a backup file
f, err = os.Open(statePath + DefaultBackupExtention)
f, err = os.Open(statePath + DefaultBackupExtension)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -787,7 +787,7 @@ func TestApply_state(t *testing.T) {
}
// Should have a backup file
f, err = os.Open(statePath + DefaultBackupExtention)
f, err = os.Open(statePath + DefaultBackupExtension)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -1161,7 +1161,7 @@ func TestApply_disableBackup(t *testing.T) {
}
// Ensure there is no backup
_, err = os.Stat(statePath + DefaultBackupExtention)
_, err = os.Stat(statePath + DefaultBackupExtension)
if err == nil || !os.IsNotExist(err) {
t.Fatalf("backup should not exist")
}

View File

@ -19,8 +19,8 @@ const DefaultStateFilename = "terraform.tfstate"
// DefaultVarsFilename is the default filename used for vars
const DefaultVarsFilename = "terraform.tfvars"
// DefaultBackupExtention is added to the state file to form the path
const DefaultBackupExtention = ".backup"
// DefaultBackupExtension is added to the state file to form the path
const DefaultBackupExtension = ".backup"
// DefaultDataDirectory is the directory where local state is stored
// by default.

View File

@ -58,7 +58,7 @@ type Meta struct {
// be overriden.
//
// backupPath is used to backup the state file before writing a modified
// version. It defaults to stateOutPath + DefaultBackupExtention
// version. It defaults to stateOutPath + DefaultBackupExtension
statePath string
stateOutPath string
backupPath string
@ -74,7 +74,7 @@ func (m *Meta) initStatePaths() {
m.stateOutPath = m.statePath
}
if m.backupPath == "" {
m.backupPath = m.stateOutPath + DefaultBackupExtention
m.backupPath = m.stateOutPath + DefaultBackupExtension
}
}

View File

@ -190,7 +190,7 @@ func TestMeta_initStatePaths(t *testing.T) {
if m.stateOutPath != DefaultStateFilename {
t.Fatalf("bad: %#v", m)
}
if m.backupPath != DefaultStateFilename+DefaultBackupExtention {
if m.backupPath != DefaultStateFilename+DefaultBackupExtension {
t.Fatalf("bad: %#v", m)
}
@ -201,7 +201,7 @@ func TestMeta_initStatePaths(t *testing.T) {
if m.stateOutPath != "foo" {
t.Fatalf("bad: %#v", m)
}
if m.backupPath != "foo"+DefaultBackupExtention {
if m.backupPath != "foo"+DefaultBackupExtension {
t.Fatalf("bad: %#v", m)
}
@ -212,7 +212,7 @@ func TestMeta_initStatePaths(t *testing.T) {
if m.statePath != DefaultStateFilename {
t.Fatalf("bad: %#v", m)
}
if m.backupPath != "foo"+DefaultBackupExtention {
if m.backupPath != "foo"+DefaultBackupExtension {
t.Fatalf("bad: %#v", m)
}
}

View File

@ -88,7 +88,7 @@ func TestPlan_destroy(t *testing.T) {
}
}
f, err := os.Open(statePath + DefaultBackupExtention)
f, err := os.Open(statePath + DefaultBackupExtension)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -561,7 +561,7 @@ func TestPlan_disableBackup(t *testing.T) {
}
// Ensure there is no backup
_, err = os.Stat(statePath + DefaultBackupExtention)
_, err = os.Stat(statePath + DefaultBackupExtension)
if err == nil || !os.IsNotExist(err) {
t.Fatalf("backup should not exist")
}

View File

@ -261,7 +261,7 @@ Options:
automatically loaded if this flag is not specified.
-vcs=true If true (default), push will upload only files
comitted to your VCS, if detected.
committed to your VCS, if detected.
-no-color If specified, output won't contain any color.

View File

@ -202,7 +202,7 @@ func TestRefresh_defaultState(t *testing.T) {
t.Fatalf("bad: %#v", actual)
}
f, err = os.Open(statePath + DefaultBackupExtention)
f, err = os.Open(statePath + DefaultBackupExtension)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -286,7 +286,7 @@ func TestRefresh_outPath(t *testing.T) {
t.Fatalf("bad: %#v", actual)
}
f, err = os.Open(outPath + DefaultBackupExtention)
f, err = os.Open(outPath + DefaultBackupExtension)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -575,7 +575,7 @@ func TestRefresh_disableBackup(t *testing.T) {
}
// Ensure there is no backup
_, err = os.Stat(outPath + DefaultBackupExtention)
_, err = os.Stat(outPath + DefaultBackupExtension)
if err == nil || !os.IsNotExist(err) {
t.Fatalf("backup should not exist")
}

View File

@ -295,7 +295,7 @@ func (c *RemoteConfigCommand) enableRemoteState() int {
if backupPath != "-" {
// Provide default backup path if none provided
if backupPath == "" {
backupPath = c.conf.statePath + DefaultBackupExtention
backupPath = c.conf.statePath + DefaultBackupExtension
}
log.Printf("[INFO] Writing backup state to: %s", backupPath)

View File

@ -419,7 +419,7 @@ func testRemoteLocal(t *testing.T, exists bool) {
}
func testRemoteLocalBackup(t *testing.T, exists bool) {
_, err := os.Stat(DefaultStateFilename + DefaultBackupExtention)
_, err := os.Stat(DefaultStateFilename + DefaultBackupExtension)
if os.IsNotExist(err) && !exists {
return
}

View File

@ -150,7 +150,7 @@ func State(opts *StateOpts) (*StateResult, error) {
// If we have a result, make sure to back it up
if result.State != nil {
backupPath := result.StatePath + DefaultBackupExtention
backupPath := result.StatePath + DefaultBackupExtension
if opts.BackupPath != "" {
backupPath = opts.BackupPath
}
@ -194,7 +194,7 @@ func StateFromPlan(
// If we have a result, make sure to back it up
result = &state.BackupState{
Real: result,
Path: resultPath + DefaultBackupExtention,
Path: resultPath + DefaultBackupExtension,
}
return result, resultPath, nil

View File

@ -139,7 +139,7 @@ func (c *Communicator) Connect(o terraform.UIOutput) (err error) {
c.client = ssh.NewClient(sshConn, sshChan, req)
if c.config.sshAgent != nil {
log.Printf("[DEBUG] Telling SSH config to foward to agent")
log.Printf("[DEBUG] Telling SSH config to forward to agent")
if err := c.config.sshAgent.ForwardToAgent(c.client); err != nil {
return err
}

View File

@ -603,7 +603,7 @@ func loadProvisionersHcl(os *hclobj.Object, connInfo map[string]interface{}) ([]
return nil, err
}
// Delete the "connection" section, handle seperately
// Delete the "connection" section, handle separately
delete(config, "connection")
rawConfig, err := NewRawConfig(config)

View File

@ -791,7 +791,7 @@ func (m schemaMap) diffSet(
ns := n.(*Set)
// If the new value was set, compare the listCode's to determine if
// the two are equal. Comparing listCode's instead of the actuall values
// the two are equal. Comparing listCode's instead of the actual values
// is needed because there could be computed values in the set which
// would result in false positives while comparing.
if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {

View File

@ -2920,7 +2920,7 @@ func TestSchemaMap_InternalValidate(t *testing.T) {
if tc.Err {
t.Fatalf("%d: Expected error did not occur:\n\n%#v", i, tc.In)
}
t.Fatalf("%d: Unexpected error occured:\n\n%#v", i, tc.In)
t.Fatalf("%d: Unexpected error occurred:\n\n%#v", i, tc.In)
}
}

View File

@ -41,7 +41,7 @@ func (d *Diff) AddModule(path []string) *ModuleDiff {
}
// ModuleByPath is used to lookup the module diff for the given path.
// This should be the prefered lookup mechanism as it allows for future
// This should be the preferred lookup mechanism as it allows for future
// lookup optimizations.
func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
if d == nil {
@ -459,7 +459,7 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
// This is a little tricky, but when a diff contains a computed list
// or set that can only be interpolated after the apply command has
// created the dependant resources, it could turn out that the result
// created the dependent resources, it could turn out that the result
// is actually the same as the existing state which would remove the
// key from the diff.
if diffOld.NewComputed && strings.HasSuffix(k, ".#") {

View File

@ -83,7 +83,7 @@ func (s *State) AddModule(path []string) *ModuleState {
}
// ModuleByPath is used to lookup the module state for the given path.
// This should be the prefered lookup mechanism as it allows for future
// This should be the preferred lookup mechanism as it allows for future
// lookup optimizations.
func (s *State) ModuleByPath(path []string) *ModuleState {
if s == nil {

View File

@ -95,7 +95,7 @@ func (t *FlattenTransformer) Transform(g *Graph) error {
g.ConnectDependent(sv)
}
// Re-connect all the things that dependend on the graph
// Re-connect all the things that dependent on the graph
// we just flattened. This should connect them back into the
// correct nodes if their DependentOn() is setup correctly.
for _, v := range dependents {

View File

@ -67,7 +67,7 @@ The command-line flags are all optional. The list of available flags are:
* `-var-file=foo` - Set the value of variables using a variable file.
* `-vcs=true` - If true (default), then Terraform will detect if a VCS
is in use, such as Git, and will only upload files that are comitted to
is in use, such as Git, and will only upload files that are committed to
version control. If no version control system is detected, Terraform will
upload all files in `path` (parameter to the command).
@ -77,7 +77,7 @@ The files that are uploaded and packaged with a `push` are all the
files in the `path` given as the parameter to the command, recursively.
By default (unless `-vcs=false` is specified), Terraform will automatically
detect when a VCS such as Git is being used, and in that case will only
upload the files that are comitted. Because of this built-in intelligence,
upload the files that are committed. Because of this built-in intelligence,
you don't have to worry about excluding folders such as ".git" or ".hg" usually.
If Terraform doesn't detect a VCS, it will upload all files.

View File

@ -26,7 +26,7 @@ resource "aws_cloudwatch_metric_alarm" "foobar" {
}
```
## Example in Conjuction with Scaling Policies
## Example in Conjunction with Scaling Policies
```
resource "aws_autoscaling_policy" "bat" {
name = "foobar3-terraform-test"

View File

@ -115,7 +115,7 @@ The following arguments are supported:
* `domain_name` - (Optional) The name of an Active Directory domain to join.
* `domain_ou` - (Optional) Specifies the LDAP Organisational Unit to place the
* `domain_ou` - (Optional) Specifies the LDAP Organizational Unit to place the
instance in.
* `domain_username` - (Optional) The username of an account with permission to

View File

@ -71,7 +71,7 @@ resource "google_compute_autoscaler" "foobar" {
}
```
## Argument Refernce
## Argument Reference
The following arguments are supported:

View File

@ -27,7 +27,7 @@ resource "google_compute_instance_group_manager" "foobar" {
}
```
## Argument Refernce
## Argument Reference
The following arguments are supported:

View File

@ -113,7 +113,7 @@ The following arguments are supported:
from the set of predefined values. Defaults to `false`, meaning that the user may choose only
one value.
* `multi_value_delimeter`: (Optional) Delimeter used to join together multiple values into a single
* `multi_value_delimiter`: (Optional) Delimiter used to join together multiple values into a single
string when `allow_multiple_values` is set and the user chooses multiple values.
* `obscure_input`: (Optional) Boolean controlling whether the value of this option should be obscured

View File

@ -73,7 +73,7 @@ The following arguments are supported:
* `os_type (string)` - (Optional) The OS type of the node. Valid options are: `linux` and
`windows`. If not supplied the connection type will be used to determine the OS type (`ssh`
will asume `linux` and `winrm` will assume `windows`).
will assume `linux` and `winrm` will assume `windows`).
* `prevent_sudo (boolean)` - (Optional) Prevent the use of sudo while installing, configuring
and running the initial Chef Client run. This option is only used with `ssh` type