Sync the vendor folder with the manifest

A number of PRs have come through which modified the vendor folder
without recording the proper information. This resets everything back to
the recorded version.
This commit is contained in:
James Bardin 2017-01-19 14:10:52 -05:00
parent 570af34f0f
commit a0b70b0ec7
235 changed files with 16280 additions and 3011 deletions

View File

@ -1 +0,0 @@
service_name: travis-ci

View File

@ -1,3 +0,0 @@
*.swp
*.cov
Godeps/_workspace

View File

@ -1,8 +0,0 @@
language: go
go:
- 1.5
install: make deps
script:
- make test

0
vendor/github.com/CenturyLinkCloud/clc-sdk/cover.sh generated vendored Normal file → Executable file
View File

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
.idea
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.iml

View File

@ -1,13 +0,0 @@
language: go
go:
- 1.2
- 1.3
- 1.4
- tip
install: go get -v -t
notifications:
email:
- u@gogs.io

View File

@ -1,2 +0,0 @@
macaron.sublime-project
macaron.sublime-workspace

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -30,57 +30,12 @@ type JobDetail struct {
GroupName string `xml:"group,omitempty"`
ProjectName string `xml:"context>project,omitempty"`
OptionsConfig *JobOptions `xml:"context>options,omitempty"`
Description string `xml:"description"`
Description string `xml:"description,omitempty"`
LogLevel string `xml:"loglevel,omitempty"`
AllowConcurrentExecutions bool `xml:"multipleExecutions,omitempty"`
Dispatch *JobDispatch `xml:"dispatch,omitempty"`
AllowConcurrentExecutions bool `xml:"multipleExecutions"`
Dispatch *JobDispatch `xml:"dispatch"`
CommandSequence *JobCommandSequence `xml:"sequence,omitempty"`
Timeout string `xml:"timeout,omitempty"`
Retry string `xml:"retry,omitempty"`
NodeFilter *JobNodeFilter `xml:"nodefilters,omitempty"`
/* If Dispatch is enabled, nodesSelectedByDefault is always present with true/false.
* by this reason omitempty cannot be present.
* This has to be handle by the user.
*/
NodesSelectedByDefault bool `xml:"nodesSelectedByDefault"`
Schedule *JobSchedule `xml:"schedule,omitempty"`
}
type JobSchedule struct {
XMLName xml.Name `xml:"schedule"`
DayOfMonth *JobScheduleDayOfMonth `xml:"dayofmonth,omitempty"`
Time JobScheduleTime `xml:"time"`
Month JobScheduleMonth `xml:"month"`
WeekDay *JobScheduleWeekDay `xml:"weekday,omitempty"`
Year JobScheduleYear `xml:"year"`
}
type JobScheduleDayOfMonth struct {
XMLName xml.Name `xml:"dayofmonth"`
}
type JobScheduleMonth struct {
XMLName xml.Name `xml:"month"`
Day string `xml:"day,attr,omitempty"`
Month string `xml:"month,attr"`
}
type JobScheduleYear struct {
XMLName xml.Name `xml:"year"`
Year string `xml:"year,attr"`
}
type JobScheduleWeekDay struct {
XMLName xml.Name `xml:"weekday"`
Day string `xml:"day,attr"`
}
type JobScheduleTime struct {
XMLName xml.Name `xml:"time"`
Hour string `xml:"hour,attr"`
Minute string `xml:"minute,attr"`
Seconds string `xml:"seconds,attr"`
}
type jobDetailList struct {
@ -98,40 +53,13 @@ type JobOptions struct {
type JobOption struct {
XMLName xml.Name `xml:"option"`
// If AllowsMultipleChoices is set, the string that will be used to delimit the multiple
// chosen options.
MultiValueDelimiter string `xml:"delimiter,attr,omitempty"`
// If set, Rundeck will reject values that are not in the set of predefined choices.
RequirePredefinedChoice bool `xml:"enforcedvalues,attr,omitempty"`
// When either ValueChoices or ValueChoicesURL is set, controls whether more than one
// choice may be selected as the value.
AllowsMultipleValues bool `xml:"multivalued,attr,omitempty"`
// The name of the option, which can be used to interpolate its value
// into job commands.
Name string `xml:"name,attr,omitempty"`
// Regular expression to be used to validate the option value.
ValidationRegex string `xml:"regex,attr,omitempty"`
// If set, Rundeck requires a value to be set for this option.
IsRequired bool `xml:"required,attr,omitempty"`
// If set, the input for this field will be obscured in the UI. Useful for passwords
// and other secrets.
ObscureInput bool `xml:"secure,attr,omitempty"`
// If ObscureInput is set, StoragePath can be used to point out credentials.
StoragePath string `xml:"storagePath,attr,omitempty"`
// The default value of the option.
DefaultValue string `xml:"value,attr,omitempty"`
// If set, the value can be accessed from scripts.
ValueIsExposedToScripts bool `xml:"valueExposed,attr,omitempty"`
// A sequence of predefined choices for this option. Mutually exclusive with ValueChoicesURL.
ValueChoices JobValueChoices `xml:"values,attr"`
@ -139,10 +67,33 @@ type JobOption struct {
// Mutually exclusive with ValueChoices
ValueChoicesURL string `xml:"valuesUrl,attr,omitempty"`
// If set, Rundeck will reject values that are not in the set of predefined choices.
RequirePredefinedChoice bool `xml:"enforcedvalues,attr,omitempty"`
// Regular expression to be used to validate the option value.
ValidationRegex string `xml:"regex,attr,omitempty"`
// Description of the value to be shown in the Rundeck UI.
Description string `xml:"description,omitempty"`
}
// If set, Rundeck requires a value to be set for this option.
IsRequired bool `xml:"required,attr,omitempty"`
// When either ValueChoices or ValueChoicesURL is set, controls whether more than one
// choice may be selected as the value.
AllowsMultipleValues bool `xml:"multivalued,attr,omitempty"`
// If AllowsMultipleChoices is set, the string that will be used to delimit the multiple
// chosen options.
MultiValueDelimiter string `xml:"delimeter,attr,omitempty"`
// If set, the input for this field will be obscured in the UI. Useful for passwords
// and other secrets.
ObscureInput bool `xml:"secure,attr,omitempty"`
// If set, the value can be accessed from scripts.
ValueIsExposedToScripts bool `xml:"valueExposed,attr,omitempty"`
}
// JobValueChoices is a specialization of []string representing a sequence of predefined values
// for a job option.
@ -161,9 +112,6 @@ type JobCommandSequence struct {
// Sequence of commands to run in the sequence.
Commands []JobCommand `xml:"command"`
// Description
Description string `xml:"description,omitempty"`
}
// JobCommand describes a particular command to run within the sequence of commands on a job.
@ -172,21 +120,9 @@ type JobCommandSequence struct {
type JobCommand struct {
XMLName xml.Name
// If the Workflow keepgoing is false, this allows the Workflow to continue when the Error Handler is successful.
ContinueOnError bool `xml:"keepgoingOnSuccess,attr,omitempty"`
// Description
Description string `xml:"description,omitempty"`
// On error:
ErrorHandler *JobCommand `xml:"errorhandler,omitempty"`
// A literal shell command to run.
ShellCommand string `xml:"exec,omitempty"`
// Add extension to the temporary filename.
FileExtension string `xml:"fileExtension,omitempty"`
// An inline program to run. This will be written to disk and executed, so if it is
// a shell script it should have an appropriate #! line.
Script string `xml:"script,omitempty"`
@ -197,9 +133,6 @@ type JobCommand struct {
// When ScriptFile is set, the arguments to provide to the script when executing it.
ScriptFileArgs string `xml:"scriptargs,omitempty"`
// ScriptInterpreter is used to execute (Script)File with.
ScriptInterpreter *JobCommandScriptInterpreter `xml:"scriptinterpreter,omitempty"`
// A reference to another job to run as this command.
Job *JobCommandJobRef `xml:"jobref"`
@ -210,20 +143,12 @@ type JobCommand struct {
NodeStepPlugin *JobPlugin `xml:"node-step-plugin"`
}
// (Inline) Script interpreter
type JobCommandScriptInterpreter struct {
XMLName xml.Name `xml:"scriptinterpreter"`
InvocationString string `xml:",chardata"`
ArgsQuoted bool `xml:"argsquoted,attr,omitempty"`
}
// JobCommandJobRef is a reference to another job that will run as one of the commands of a job.
type JobCommandJobRef struct {
XMLName xml.Name `xml:"jobref"`
Name string `xml:"name,attr"`
GroupName string `xml:"group,attr"`
RunForEachNode bool `xml:"nodeStep,attr"`
NodeFilter *JobNodeFilter `xml:"nodefilters,omitempty"`
Arguments JobCommandJobRefArguments `xml:"arg"`
}

View File

@ -1,314 +0,0 @@
package rundeck
import (
"fmt"
"testing"
)
func TestUnmarshalJobDetail(t *testing.T) {
testUnmarshalXML(t, []unmarshalTest{
unmarshalTest{
"with-config",
`<job><uuid>baz</uuid><dispatch><rankOrder>ascending</rankOrder></dispatch></job>`,
&JobDetail{},
func (rv interface {}) error {
v := rv.(*JobDetail)
if v.ID != "baz" {
return fmt.Errorf("got ID %s, but expecting baz", v.ID)
}
if v.Dispatch.RankOrder != "ascending" {
return fmt.Errorf("Dispatch.RankOrder = \"%v\", but expecting \"ascending\"", v.Dispatch.RankOrder)
}
return nil
},
},
unmarshalTest{
"with-empty-config",
`<JobPlugin type="foo-plugin"><configuration/></JobPlugin>`,
&JobPlugin{},
func (rv interface {}) error {
v := rv.(*JobPlugin)
if v.Type != "foo-plugin" {
return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type)
}
if len(v.Config) != 0 {
return fmt.Errorf("got %i Config values, but expecting 0", len(v.Config))
}
return nil
},
},
})
}
func TestMarshalJobPlugin(t *testing.T) {
testMarshalXML(t, []marshalTest{
marshalTest{
"with-config",
JobPlugin{
Type: "foo-plugin",
Config: map[string]string{
"woo": "foo",
"bar": "baz",
},
},
`<JobPlugin type="foo-plugin"><configuration><entry key="bar" value="baz"></entry><entry key="woo" value="foo"></entry></configuration></JobPlugin>`,
},
marshalTest{
"with-empty-config",
JobPlugin{
Type: "foo-plugin",
Config: map[string]string{},
},
`<JobPlugin type="foo-plugin"></JobPlugin>`,
},
marshalTest{
"with-zero-value-config",
JobPlugin{
Type: "foo-plugin",
},
`<JobPlugin type="foo-plugin"></JobPlugin>`,
},
})
}
func TestUnmarshalJobPlugin(t *testing.T) {
testUnmarshalXML(t, []unmarshalTest{
unmarshalTest{
"with-config",
`<JobPlugin type="foo-plugin"><configuration><entry key="woo" value="foo"/><entry key="bar" value="baz"/></configuration></JobPlugin>`,
&JobPlugin{},
func (rv interface {}) error {
v := rv.(*JobPlugin)
if v.Type != "foo-plugin" {
return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type)
}
if len(v.Config) != 2 {
return fmt.Errorf("got %v Config values, but expecting 2", len(v.Config))
}
if v.Config["woo"] != "foo" {
return fmt.Errorf("Config[\"woo\"] = \"%s\", but expecting \"foo\"", v.Config["woo"])
}
if v.Config["bar"] != "baz" {
return fmt.Errorf("Config[\"bar\"] = \"%s\", but expecting \"baz\"", v.Config["bar"])
}
return nil
},
},
unmarshalTest{
"with-empty-config",
`<JobPlugin type="foo-plugin"><configuration/></JobPlugin>`,
&JobPlugin{},
func (rv interface {}) error {
v := rv.(*JobPlugin)
if v.Type != "foo-plugin" {
return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type)
}
if len(v.Config) != 0 {
return fmt.Errorf("got %i Config values, but expecting 0", len(v.Config))
}
return nil
},
},
})
}
func TestMarshalJobCommand(t *testing.T) {
testMarshalXML(t, []marshalTest{
marshalTest{
"with-shell",
JobCommand{
ShellCommand: "command",
},
`<JobCommand><exec>command</exec></JobCommand>`,
},
marshalTest{
"with-script",
JobCommand{
Script: "script",
},
`<JobCommand><script>script</script></JobCommand>`,
},
marshalTest{
"with-script-interpreter",
JobCommand{
FileExtension: "sh",
Script: "Hello World!",
ScriptInterpreter: &JobCommandScriptInterpreter{
InvocationString: "sudo",
},
},
`<JobCommand><fileExtension>sh</fileExtension><script>Hello World!</script><scriptinterpreter>sudo</scriptinterpreter></JobCommand>`,
},
})
}
func TestUnmarshalJobCommand(t *testing.T) {
testUnmarshalXML(t, []unmarshalTest{
unmarshalTest{
"with-shell",
`<JobCommand><exec>command</exec></JobCommand>`,
&JobCommand{},
func (rv interface {}) error {
v := rv.(*JobCommand)
if v.ShellCommand != "command" {
return fmt.Errorf("got ShellCommand %s, but expecting command", v.ShellCommand)
}
return nil
},
},
unmarshalTest{
"with-script",
`<JobCommand><script>script</script></JobCommand>`,
&JobCommand{},
func (rv interface {}) error {
v := rv.(*JobCommand)
if v.Script != "script" {
return fmt.Errorf("got Script %s, but expecting script", v.Script)
}
return nil
},
},
unmarshalTest{
"with-script-interpreter",
`<JobCommand><script>Hello World!</script><fileExtension>sh</fileExtension><scriptinterpreter>sudo</scriptinterpreter></JobCommand>`,
&JobCommand{},
func (rv interface {}) error {
v := rv.(*JobCommand)
if v.FileExtension != "sh" {
return fmt.Errorf("got FileExtension %s, but expecting sh", v.FileExtension)
}
if v.Script != "Hello World!" {
return fmt.Errorf("got Script %s, but expecting Hello World!", v.Script)
}
if v.ScriptInterpreter == nil {
return fmt.Errorf("got %s, but expecting not nil", v.ScriptInterpreter)
}
if v.ScriptInterpreter.InvocationString != "sudo" {
return fmt.Errorf("got InvocationString %s, but expecting sudo", v.ScriptInterpreter.InvocationString)
}
return nil
},
},
})
}
func TestMarshalScriptInterpreter(t *testing.T) {
testMarshalXML(t, []marshalTest{
marshalTest{
"with-script-interpreter",
JobCommandScriptInterpreter{
InvocationString: "sudo",
},
`<scriptinterpreter>sudo</scriptinterpreter>`,
},
marshalTest{
"with-script-interpreter-quoted",
JobCommandScriptInterpreter{
ArgsQuoted: true,
InvocationString: "sudo",
},
`<scriptinterpreter argsquoted="true">sudo</scriptinterpreter>`,
},
})
}
func TestUnmarshalScriptInterpreter(t *testing.T) {
testUnmarshalXML(t, []unmarshalTest{
unmarshalTest{
"with-script-interpreter",
`<scriptinterpreter>sudo</scriptinterpreter>`,
&JobCommandScriptInterpreter{},
func (rv interface {}) error {
v := rv.(*JobCommandScriptInterpreter)
if v.InvocationString != "sudo" {
return fmt.Errorf("got InvocationString %s, but expecting sudo", v.InvocationString)
}
if v.ArgsQuoted {
return fmt.Errorf("got ArgsQuoted %s, but expecting false", v.ArgsQuoted)
}
return nil
},
},
unmarshalTest{
"with-script-interpreter-quoted",
`<scriptinterpreter argsquoted="true">sudo</scriptinterpreter>`,
&JobCommandScriptInterpreter{},
func (rv interface {}) error {
v := rv.(*JobCommandScriptInterpreter)
if v.InvocationString != "sudo" {
return fmt.Errorf("got InvocationString %s, but expecting sudo", v.InvocationString)
}
if ! v.ArgsQuoted {
return fmt.Errorf("got ArgsQuoted %s, but expecting true", v.ArgsQuoted)
}
return nil
},
},
})
}
func TestMarshalErrorHanlder(t *testing.T) {
testMarshalXML(t, []marshalTest{
marshalTest{
"with-errorhandler",
JobCommandSequence{
ContinueOnError: true,
OrderingStrategy: "step-first",
Commands: []JobCommand{
JobCommand{
Script: "inline_script",
ErrorHandler: &JobCommand{
ContinueOnError: true,
Script: "error_script",
},
},
},
},
`<sequence keepgoing="true" strategy="step-first"><command><errorhandler keepgoingOnSuccess="true"><script>error_script</script></errorhandler><script>inline_script</script></command></sequence>`,
},
})
}
func TestMarshalJobOption(t *testing.T) {
testMarshalXML(t, []marshalTest{
marshalTest{
"with-option-basic",
JobOption{
Name: "basic",
},
`<option name="basic"></option>`,
},
marshalTest{
"with-option-multivalued",
JobOption{
Name: "Multivalued",
MultiValueDelimiter: "|",
RequirePredefinedChoice: true,
AllowsMultipleValues: true,
IsRequired: true,
ValueChoices: JobValueChoices([]string{"myValues"}),
},
`<option delimiter="|" enforcedvalues="true" multivalued="true" name="Multivalued" required="true" values="myValues"></option>`,
},
marshalTest{
"with-all-attributes",
JobOption{
Name: "advanced",
MultiValueDelimiter: "|",
RequirePredefinedChoice: true,
AllowsMultipleValues: true,
ValidationRegex: ".+",
IsRequired: true,
ObscureInput: true,
StoragePath: "myKey",
DefaultValue: "myValue",
ValueIsExposedToScripts: true,
ValueChoices: JobValueChoices([]string{"myValues"}),
ValueChoicesURL: "myValuesUrl",
},
`<option delimiter="|" enforcedvalues="true" multivalued="true" name="advanced" regex=".+" required="true" secure="true" storagePath="myKey" value="myValue" valueExposed="true" values="myValues" valuesUrl="myValuesUrl"></option>`,
},
})
}

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

0
vendor/github.com/armon/circbuf/LICENSE generated vendored Normal file → Executable file
View File

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@ -1,3 +0,0 @@
language: go
go:
- tip

View File

@ -0,0 +1,70 @@
// Package endpoints validates regional endpoints for services.
package endpoints
//go:generate go run -tags codegen ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
//go:generate gofmt -s -w endpoints_map.go
import (
"fmt"
"regexp"
"strings"
)
// NormalizeEndpoint takes and endpoint and service API information to return a
// normalized endpoint and signing region. If the endpoint is not an empty string
// the service name and region will be used to look up the service's API endpoint.
// If the endpoint is provided the scheme will be added if it is not present.
func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) {
if endpoint == "" {
return EndpointForRegion(serviceName, region, disableSSL, useDualStack)
}
return AddScheme(endpoint, disableSSL), ""
}
// EndpointForRegion returns an endpoint and its signing region for a service and region.
// if the service and region pair are not found endpoint and signingRegion will be empty.
func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) {
dualStackField := ""
if useDualStack {
dualStackField = "/dualstack"
}
derivedKeys := []string{
region + "/" + svcName + dualStackField,
region + "/*" + dualStackField,
"*/" + svcName + dualStackField,
"*/*" + dualStackField,
}
for _, key := range derivedKeys {
if val, ok := endpointsMap.Endpoints[key]; ok {
ep := val.Endpoint
ep = strings.Replace(ep, "{region}", region, -1)
ep = strings.Replace(ep, "{service}", svcName, -1)
endpoint = ep
signingRegion = val.SigningRegion
break
}
}
return AddScheme(endpoint, disableSSL), signingRegion
}
// Regular expression to determine if the endpoint string is prefixed with a scheme.
var schemeRE = regexp.MustCompile("^([^:]+)://")
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
func AddScheme(endpoint string, disableSSL bool) string {
if endpoint != "" && !schemeRE.MatchString(endpoint) {
scheme := "https"
if disableSSL {
scheme = "http"
}
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
}
return endpoint
}

View File

@ -0,0 +1,82 @@
{
"version": 2,
"endpoints": {
"*/*": {
"endpoint": "{service}.{region}.amazonaws.com"
},
"cn-north-1/*": {
"endpoint": "{service}.{region}.amazonaws.com.cn",
"signatureVersion": "v4"
},
"cn-north-1/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"us-gov-west-1/iam": {
"endpoint": "iam.us-gov.amazonaws.com"
},
"us-gov-west-1/sts": {
"endpoint": "sts.us-gov-west-1.amazonaws.com"
},
"us-gov-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"us-gov-west-1/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"*/budgets": {
"endpoint": "budgets.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/cloudfront": {
"endpoint": "cloudfront.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/cloudsearchdomain": {
"endpoint": "",
"signingRegion": "us-east-1"
},
"*/data.iot": {
"endpoint": "",
"signingRegion": "us-east-1"
},
"*/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"*/iam": {
"endpoint": "iam.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/importexport": {
"endpoint": "importexport.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/route53": {
"endpoint": "route53.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/sts": {
"endpoint": "sts.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/waf": {
"endpoint": "waf.amazonaws.com",
"signingRegion": "us-east-1"
},
"us-east-1/sdb": {
"endpoint": "sdb.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"*/s3/dualstack": {
"endpoint": "s3.dualstack.{region}.amazonaws.com"
},
"us-east-1/s3": {
"endpoint": "s3.amazonaws.com"
},
"eu-central-1/s3": {
"endpoint": "{service}.{region}.amazonaws.com"
}
}
}

View File

@ -0,0 +1,95 @@
package endpoints
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
type endpointStruct struct {
Version int
Endpoints map[string]endpointEntry
}
type endpointEntry struct {
Endpoint string
SigningRegion string
}
var endpointsMap = endpointStruct{
Version: 2,
Endpoints: map[string]endpointEntry{
"*/*": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"*/budgets": {
Endpoint: "budgets.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/cloudfront": {
Endpoint: "cloudfront.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/cloudsearchdomain": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/data.iot": {
Endpoint: "",
SigningRegion: "us-east-1",
},
"*/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"*/iam": {
Endpoint: "iam.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/importexport": {
Endpoint: "importexport.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/route53": {
Endpoint: "route53.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"*/s3/dualstack": {
Endpoint: "s3.dualstack.{region}.amazonaws.com",
},
"*/sts": {
Endpoint: "sts.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/waf": {
Endpoint: "waf.amazonaws.com",
SigningRegion: "us-east-1",
},
"cn-north-1/*": {
Endpoint: "{service}.{region}.amazonaws.com.cn",
},
"cn-north-1/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"eu-central-1/s3": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"us-east-1/s3": {
Endpoint: "s3.amazonaws.com",
},
"us-east-1/sdb": {
Endpoint: "sdb.amazonaws.com",
SigningRegion: "us-east-1",
},
"us-gov-west-1/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"us-gov-west-1/iam": {
Endpoint: "iam.us-gov.amazonaws.com",
},
"us-gov-west-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"us-gov-west-1/sts": {
Endpoint: "sts.us-gov-west-1.amazonaws.com",
},
},
}

View File

@ -1,2 +0,0 @@
example/example
example/example.exe

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@ -1,2 +0,0 @@
language: go
go: 1.3.3

View File

@ -1,178 +0,0 @@
package archive_test
import (
"bytes"
"fmt"
"io"
"testing"
"github.com/cihub/seelog/archive"
"github.com/cihub/seelog/archive/gzip"
"github.com/cihub/seelog/archive/tar"
"github.com/cihub/seelog/archive/zip"
"github.com/cihub/seelog/io/iotest"
)
const (
gzipType = "gzip"
tarType = "tar"
zipType = "zip"
)
var types = []string{gzipType, tarType, zipType}
type file struct {
name string
contents []byte
}
var (
oneFile = []file{
{
name: "file1",
contents: []byte("This is a single log."),
},
}
twoFiles = []file{
{
name: "file1",
contents: []byte("This is a log."),
},
{
name: "file2",
contents: []byte("This is another log."),
},
}
)
type testCase struct {
srcType, dstType string
in []file
}
func copyTests() map[string]testCase {
// types X types X files
tests := make(map[string]testCase, len(types)*len(types)*2)
for _, srct := range types {
for _, dstt := range types {
tests[fmt.Sprintf("%s to %s: one file", srct, dstt)] = testCase{
srcType: srct,
dstType: dstt,
in: oneFile,
}
// gzip does not handle more than one file
if srct != gzipType && dstt != gzipType {
tests[fmt.Sprintf("%s to %s: two files", srct, dstt)] = testCase{
srcType: srct,
dstType: dstt,
in: twoFiles,
}
}
}
}
return tests
}
func TestCopy(t *testing.T) {
srcb, dstb := new(bytes.Buffer), new(bytes.Buffer)
for tname, tt := range copyTests() {
// Reset buffers between tests
srcb.Reset()
dstb.Reset()
// Last file name (needed for gzip.NewReader)
var fname string
// Seed the src
srcw := writer(t, tname, srcb, tt.srcType)
for _, f := range tt.in {
srcw.NextFile(f.name, iotest.FileInfo(t, f.contents))
mustCopy(t, tname, srcw, bytes.NewReader(f.contents))
fname = f.name
}
mustClose(t, tname, srcw)
// Perform the copy
srcr := reader(t, tname, srcb, tt.srcType, fname)
dstw := writer(t, tname, dstb, tt.dstType)
if err := archive.Copy(dstw, srcr); err != nil {
t.Fatalf("%s: %v", tname, err)
}
srcr.Close() // Read-only
mustClose(t, tname, dstw)
// Read back dst to confirm our expectations
dstr := reader(t, tname, dstb, tt.dstType, fname)
for _, want := range tt.in {
buf := new(bytes.Buffer)
name, err := dstr.NextFile()
if err != nil {
t.Fatalf("%s: %v", tname, err)
}
mustCopy(t, tname, buf, dstr)
got := file{
name: name,
contents: buf.Bytes(),
}
switch {
case got.name != want.name:
t.Errorf("%s: got file %q but want file %q",
tname, got.name, want.name)
case !bytes.Equal(got.contents, want.contents):
t.Errorf("%s: mismatched contents in %q: got %q but want %q",
tname, got.name, got.contents, want.contents)
}
}
dstr.Close()
}
}
func writer(t *testing.T, tname string, w io.Writer, atype string) archive.WriteCloser {
switch atype {
case gzipType:
return gzip.NewWriter(w)
case tarType:
return tar.NewWriter(w)
case zipType:
return zip.NewWriter(w)
}
t.Fatalf("%s: unrecognized archive type: %s", tname, atype)
panic("execution continued after (*testing.T).Fatalf")
}
func reader(t *testing.T, tname string, buf *bytes.Buffer, atype string, fname string) archive.ReadCloser {
switch atype {
case gzipType:
gr, err := gzip.NewReader(buf, fname)
if err != nil {
t.Fatalf("%s: %v", tname, err)
}
return gr
case tarType:
return archive.NopCloser(tar.NewReader(buf))
case zipType:
zr, err := zip.NewReader(
bytes.NewReader(buf.Bytes()),
int64(buf.Len()))
if err != nil {
t.Fatalf("%s: new zip reader: %v", tname, err)
}
return archive.NopCloser(zr)
}
t.Fatalf("%s: unrecognized archive type: %s", tname, atype)
panic("execution continued after (*testing.T).Fatalf")
}
func mustCopy(t *testing.T, tname string, dst io.Writer, src io.Reader) {
if _, err := io.Copy(dst, src); err != nil {
t.Fatalf("%s: copy: %v", tname, err)
}
}
func mustClose(t *testing.T, tname string, c io.Closer) {
if err := c.Close(); err != nil {
t.Fatalf("%s: close: %v", tname, err)
}
}

View File

@ -1,104 +0,0 @@
package tar_test
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
"github.com/cihub/seelog/archive/tar"
"github.com/cihub/seelog/io/iotest"
)
type file struct {
name string
contents []byte
}
var tarTests = map[string]struct{ want []file }{
"one file": {
want: []file{
{
name: "file",
contents: []byte("I am a log file"),
},
},
},
"multiple files": {
want: []file{
{
name: "file1",
contents: []byte("I am log file 1"),
},
{
name: "file2",
contents: []byte("I am log file 2"),
},
},
},
}
func TestWriterAndReader(t *testing.T) {
for tname, tt := range tarTests {
f, cleanup := iotest.TempFile(t)
defer cleanup()
writeFiles(t, f, tname, tt.want)
readFiles(t, f, tname, tt.want)
}
}
// writeFiles iterates through the files we want and writes them as a tarred
// file.
func writeFiles(t *testing.T, f *os.File, tname string, want []file) {
w := tar.NewWriter(f)
defer w.Close()
// Write zipped files
for _, fwant := range want {
fi := iotest.FileInfo(t, fwant.contents)
// Write the file
err := w.NextFile(fwant.name, fi)
switch err {
case io.EOF:
break
default:
t.Fatalf("%s: write header for next file: %v", tname, err)
case nil: // Proceed below
}
if _, err := io.Copy(w, bytes.NewReader(fwant.contents)); err != nil {
t.Fatalf("%s: copy to writer: %v", tname, err)
}
}
}
// readFiles iterates through tarred files and ensures they are the same.
func readFiles(t *testing.T, f *os.File, tname string, want []file) {
r := tar.NewReader(f)
for _, fwant := range want {
fname, err := r.NextFile()
switch err {
case io.EOF:
return
default:
t.Fatalf("%s: read header for next file: %v", tname, err)
case nil: // Proceed below
}
if fname != fwant.name {
t.Fatalf("%s: incorrect file name: got %q but want %q", tname, fname, fwant.name)
continue
}
gotContents, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("%s: read file: %v", tname, err)
}
if !bytes.Equal(gotContents, fwant.contents) {
t.Errorf("%s: %q = %q but want %q", tname, fname, gotContents, fwant.contents)
}
}
}

View File

@ -1,99 +0,0 @@
package zip_test
import (
"bytes"
"io"
"io/ioutil"
"os"
"testing"
"github.com/cihub/seelog/archive/zip"
"github.com/cihub/seelog/io/iotest"
)
var zipTests = map[string]struct{ want map[string][]byte }{
"one file": {
want: map[string][]byte{
"file": []byte("I am a log file"),
},
},
"multiple files": {
want: map[string][]byte{
"file1": []byte("I am log file 1"),
"file2": []byte("I am log file 2"),
},
},
}
func TestWriterAndReader(t *testing.T) {
for tname, tt := range zipTests {
f, cleanup := iotest.TempFile(t)
defer cleanup()
writeFiles(t, f, tname, tt.want)
readFiles(t, f, tname, tt.want)
}
}
// writeFiles iterates through the files we want and writes them as a zipped
// file.
func writeFiles(t *testing.T, f *os.File, tname string, want map[string][]byte) {
w := zip.NewWriter(f)
defer w.Close()
// Write zipped files
for fname, fbytes := range want {
fi := iotest.FileInfo(t, fbytes)
// Write the file
err := w.NextFile(fname, fi)
switch err {
case io.EOF:
break
default:
t.Fatalf("%s: write header for next file: %v", tname, err)
case nil: // Proceed below
}
if _, err := io.Copy(w, bytes.NewReader(fbytes)); err != nil {
t.Fatalf("%s: copy to writer: %v", tname, err)
}
}
}
// readFiles iterates through zipped files and ensures they are the same.
func readFiles(t *testing.T, f *os.File, tname string, want map[string][]byte) {
// Get zip Reader
fi, err := f.Stat()
if err != nil {
t.Fatalf("%s: stat zipped file: %v", tname, err)
}
r, err := zip.NewReader(f, fi.Size())
if err != nil {
t.Fatalf("%s: %v", tname, err)
}
for {
fname, err := r.NextFile()
switch err {
case io.EOF:
return
default:
t.Fatalf("%s: read header for next file: %v", tname, err)
case nil: // Proceed below
}
wantBytes, ok := want[fname]
if !ok {
t.Errorf("%s: read unwanted file: %v", tname, fname)
continue
}
gotBytes, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("%s: read file: %v", tname, err)
}
if !bytes.Equal(gotBytes, wantBytes) {
t.Errorf("%s: %q = %q but want %q", tname, fname, gotBytes, wantBytes)
}
}
}

View File

@ -1,37 +0,0 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when either the code is running on Google App Engine or "-tags disableunsafe"
// is added to the go build command line.
// +build appengine disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

View File

@ -1,23 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -1,11 +0,0 @@
language: go
go:
- 1.6
- tip
sudo: false
before_install:
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
script:
- $HOME/gopath/bin/goveralls -service=travis-ci

View File

@ -1,2 +0,0 @@
# temporary symlink for testing
testing/data/symlink

View File

@ -1,27 +0,0 @@
language: go
sudo: required
go:
- 1.4.2
- 1.5.3
- 1.6
- tip
os:
- linux
- osx
env:
- GOARCH=amd64 DOCKER_VERSION=1.8.3
- GOARCH=386 DOCKER_VERSION=1.8.3
- GOARCH=amd64 DOCKER_VERSION=1.9.1
- GOARCH=386 DOCKER_VERSION=1.9.1
- GOARCH=amd64 DOCKER_VERSION=1.10.3
- GOARCH=386 DOCKER_VERSION=1.10.3
install:
- travis_retry travis-scripts/install.bash
script:
- travis-scripts/run-tests.bash
services:
- docker
matrix:
fast_finish: true
allow_failures:
- go: tip

View File

@ -1,4 +1,4 @@
Copyright (c) 2013-2016, go-dockerclient authors
Copyright (c) 2016, go-dockerclient authors
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -34,7 +34,7 @@
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context
package context // import "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context"
import (
"errors"

0
vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh generated vendored Normal file → Executable file
View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

View File

@ -19,7 +19,7 @@
// These calls return err == nil to indicate success; otherwise
// err represents an operating system error describing the failure and
// holds a value of type syscall.Errno.
package unix
package unix // import "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix"
import "unsafe"

View File

@ -1,4 +0,0 @@
testdata/conf_out.ini
ini.sublime-project
ini.sublime-workspace
testdata/conf_reflect.ini

View File

@ -1,479 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"math"
"reflect"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// Map returns the reflect.Value for the address of a map field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
}
}
func (p word32Slice) Len() int {
return p.v.Len()
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
}
}
func (p word64Slice) Len() int {
return p.v.Len()
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}

View File

@ -1,2 +0,0 @@
_*
cover*.out

View File

@ -1 +0,0 @@
.DS_Store

View File

@ -1,12 +0,0 @@
sudo: false
language: go
go:
- 1.6
branches:
only:
- master
script: make test

View File

@ -1,11 +0,0 @@
language: go
go:
- 1.0
- 1.1
- 1.2
- 1.3
- 1.4
script:
- go test

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@ -0,0 +1,399 @@
package allocdir
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"gopkg.in/tomb.v1"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hpcloud/tail/watch"
)
var (
// The name of the directory that is shared across tasks in a task group.
SharedAllocName = "alloc"
// Name of the directory where logs of Tasks are written
LogDirName = "logs"
// The set of directories that exist inside eache shared alloc directory.
SharedAllocDirs = []string{LogDirName, "tmp", "data"}
// The name of the directory that exists inside each task directory
// regardless of driver.
TaskLocal = "local"
// TaskDirs is the set of directories created in each tasks directory.
TaskDirs = []string{"tmp"}
)
type AllocDir struct {
// AllocDir is the directory used for storing any state
// of this allocation. It will be purged on alloc destroy.
AllocDir string
// The shared directory is available to all tasks within the same task
// group.
SharedDir string
// TaskDirs is a mapping of task names to their non-shared directory.
TaskDirs map[string]string
}
// AllocFileInfo holds information about a file inside the AllocDir
type AllocFileInfo struct {
Name string
IsDir bool
Size int64
FileMode string
ModTime time.Time
}
// AllocDirFS exposes file operations on the alloc dir
type AllocDirFS interface {
List(path string) ([]*AllocFileInfo, error)
Stat(path string) (*AllocFileInfo, error)
ReadAt(path string, offset int64) (io.ReadCloser, error)
BlockUntilExists(path string, t *tomb.Tomb) chan error
ChangeEvents(path string, curOffset int64, t *tomb.Tomb) (*watch.FileChanges, error)
}
func NewAllocDir(allocDir string) *AllocDir {
d := &AllocDir{AllocDir: allocDir, TaskDirs: make(map[string]string)}
d.SharedDir = filepath.Join(d.AllocDir, SharedAllocName)
return d
}
// Tears down previously build directory structure.
func (d *AllocDir) Destroy() error {
// Unmount all mounted shared alloc dirs.
var mErr multierror.Error
if err := d.UnmountAll(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if err := os.RemoveAll(d.AllocDir); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
return mErr.ErrorOrNil()
}
func (d *AllocDir) UnmountAll() error {
var mErr multierror.Error
for _, dir := range d.TaskDirs {
// Check if the directory has the shared alloc mounted.
taskAlloc := filepath.Join(dir, SharedAllocName)
if d.pathExists(taskAlloc) {
if err := d.unmountSharedDir(taskAlloc); err != nil {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("failed to unmount shared alloc dir %q: %v", taskAlloc, err))
} else if err := os.RemoveAll(taskAlloc); err != nil {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("failed to delete shared alloc dir %q: %v", taskAlloc, err))
}
}
// Unmount dev/ and proc/ have been mounted.
d.unmountSpecialDirs(dir)
}
return mErr.ErrorOrNil()
}
// Given a list of a task build the correct alloc structure.
func (d *AllocDir) Build(tasks []*structs.Task) error {
// Make the alloc directory, owned by the nomad process.
if err := os.MkdirAll(d.AllocDir, 0755); err != nil {
return fmt.Errorf("Failed to make the alloc directory %v: %v", d.AllocDir, err)
}
// Make the shared directory and make it available to all user/groups.
if err := os.MkdirAll(d.SharedDir, 0777); err != nil {
return err
}
// Make the shared directory have non-root permissions.
if err := d.dropDirPermissions(d.SharedDir); err != nil {
return err
}
for _, dir := range SharedAllocDirs {
p := filepath.Join(d.SharedDir, dir)
if err := os.MkdirAll(p, 0777); err != nil {
return err
}
if err := d.dropDirPermissions(p); err != nil {
return err
}
}
// Make the task directories.
for _, t := range tasks {
taskDir := filepath.Join(d.AllocDir, t.Name)
if err := os.MkdirAll(taskDir, 0777); err != nil {
return err
}
// Make the task directory have non-root permissions.
if err := d.dropDirPermissions(taskDir); err != nil {
return err
}
// Create a local directory that each task can use.
local := filepath.Join(taskDir, TaskLocal)
if err := os.MkdirAll(local, 0777); err != nil {
return err
}
if err := d.dropDirPermissions(local); err != nil {
return err
}
d.TaskDirs[t.Name] = taskDir
// Create the directories that should be in every task.
for _, dir := range TaskDirs {
local := filepath.Join(taskDir, dir)
if err := os.MkdirAll(local, 0777); err != nil {
return err
}
if err := d.dropDirPermissions(local); err != nil {
return err
}
}
}
return nil
}
// Embed takes a mapping of absolute directory or file paths on the host to
// their intended, relative location within the task directory. Embed attempts
// hardlink and then defaults to copying. If the path exists on the host and
// can't be embedded an error is returned.
func (d *AllocDir) Embed(task string, entries map[string]string) error {
taskdir, ok := d.TaskDirs[task]
if !ok {
return fmt.Errorf("Task directory doesn't exist for task %v", task)
}
subdirs := make(map[string]string)
for source, dest := range entries {
// Check to see if directory exists on host.
s, err := os.Stat(source)
if os.IsNotExist(err) {
continue
}
// Embedding a single file
if !s.IsDir() {
destDir := filepath.Join(taskdir, filepath.Dir(dest))
if err := os.MkdirAll(destDir, s.Mode().Perm()); err != nil {
return fmt.Errorf("Couldn't create destination directory %v: %v", destDir, err)
}
// Copy the file.
taskEntry := filepath.Join(destDir, filepath.Base(dest))
if err := d.linkOrCopy(source, taskEntry, s.Mode().Perm()); err != nil {
return err
}
continue
}
// Create destination directory.
destDir := filepath.Join(taskdir, dest)
if err := os.MkdirAll(destDir, s.Mode().Perm()); err != nil {
return fmt.Errorf("Couldn't create destination directory %v: %v", destDir, err)
}
// Enumerate the files in source.
dirEntries, err := ioutil.ReadDir(source)
if err != nil {
return fmt.Errorf("Couldn't read directory %v: %v", source, err)
}
for _, entry := range dirEntries {
hostEntry := filepath.Join(source, entry.Name())
taskEntry := filepath.Join(destDir, filepath.Base(hostEntry))
if entry.IsDir() {
subdirs[hostEntry] = filepath.Join(dest, filepath.Base(hostEntry))
continue
}
// Check if entry exists. This can happen if restarting a failed
// task.
if _, err := os.Lstat(taskEntry); err == nil {
continue
}
if !entry.Mode().IsRegular() {
// If it is a symlink we can create it, otherwise we skip it.
if entry.Mode()&os.ModeSymlink == 0 {
continue
}
link, err := os.Readlink(hostEntry)
if err != nil {
return fmt.Errorf("Couldn't resolve symlink for %v: %v", source, err)
}
if err := os.Symlink(link, taskEntry); err != nil {
// Symlinking twice
if err.(*os.LinkError).Err.Error() != "file exists" {
return fmt.Errorf("Couldn't create symlink: %v", err)
}
}
continue
}
if err := d.linkOrCopy(hostEntry, taskEntry, entry.Mode().Perm()); err != nil {
return err
}
}
}
// Recurse on self to copy subdirectories.
if len(subdirs) != 0 {
return d.Embed(task, subdirs)
}
return nil
}
// MountSharedDir mounts the shared directory into the specified task's
// directory. Mount is documented at an OS level in their respective
// implementation files.
func (d *AllocDir) MountSharedDir(task string) error {
taskDir, ok := d.TaskDirs[task]
if !ok {
return fmt.Errorf("No task directory exists for %v", task)
}
taskLoc := filepath.Join(taskDir, SharedAllocName)
if err := d.mountSharedDir(taskLoc); err != nil {
return fmt.Errorf("Failed to mount shared directory for task %v: %v", task, err)
}
return nil
}
// LogDir returns the log dir in the current allocation directory
func (d *AllocDir) LogDir() string {
return filepath.Join(d.AllocDir, SharedAllocName, LogDirName)
}
// List returns the list of files at a path relative to the alloc dir
func (d *AllocDir) List(path string) ([]*AllocFileInfo, error) {
p := filepath.Join(d.AllocDir, path)
finfos, err := ioutil.ReadDir(p)
if err != nil {
return []*AllocFileInfo{}, err
}
files := make([]*AllocFileInfo, len(finfos))
for idx, info := range finfos {
files[idx] = &AllocFileInfo{
Name: info.Name(),
IsDir: info.IsDir(),
Size: info.Size(),
FileMode: info.Mode().String(),
ModTime: info.ModTime(),
}
}
return files, err
}
// Stat returns information about the file at a path relative to the alloc dir
func (d *AllocDir) Stat(path string) (*AllocFileInfo, error) {
p := filepath.Join(d.AllocDir, path)
info, err := os.Stat(p)
if err != nil {
return nil, err
}
return &AllocFileInfo{
Size: info.Size(),
Name: info.Name(),
IsDir: info.IsDir(),
FileMode: info.Mode().String(),
ModTime: info.ModTime(),
}, nil
}
// ReadAt returns a reader for a file at the path relative to the alloc dir
func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
p := filepath.Join(d.AllocDir, path)
f, err := os.Open(p)
if err != nil {
return nil, err
}
if _, err := f.Seek(offset, 0); err != nil {
return nil, fmt.Errorf("can't seek to offset %q: %v", offset, err)
}
return f, nil
}
// BlockUntilExists blocks until the passed file relative the allocation
// directory exists. The block can be cancelled with the passed tomb.
func (d *AllocDir) BlockUntilExists(path string, t *tomb.Tomb) chan error {
// Get the path relative to the alloc directory
p := filepath.Join(d.AllocDir, path)
watcher := getFileWatcher(p)
returnCh := make(chan error, 1)
go func() {
returnCh <- watcher.BlockUntilExists(t)
close(returnCh)
}()
return returnCh
}
// ChangeEvents watches for changes to the passed path relative to the
// allocation directory. The offset should be the last read offset. The tomb is
// used to clean up the watch.
func (d *AllocDir) ChangeEvents(path string, curOffset int64, t *tomb.Tomb) (*watch.FileChanges, error) {
// Get the path relative to the alloc directory
p := filepath.Join(d.AllocDir, path)
watcher := getFileWatcher(p)
return watcher.ChangeEvents(t, curOffset)
}
// getFileWatcher returns a FileWatcher for the given path.
func getFileWatcher(path string) watch.FileWatcher {
return watch.NewPollingFileWatcher(path)
}
func fileCopy(src, dst string, perm os.FileMode) error {
// Do a simple copy.
srcFile, err := os.Open(src)
if err != nil {
return fmt.Errorf("Couldn't open src file %v: %v", src, err)
}
dstFile, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, perm)
if err != nil {
return fmt.Errorf("Couldn't create destination file %v: %v", dst, err)
}
if _, err := io.Copy(dstFile, srcFile); err != nil {
return fmt.Errorf("Couldn't copy %v to %v: %v", src, dst, err)
}
return nil
}
// pathExists is a helper function to check if the path exists.
func (d *AllocDir) pathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

View File

@ -0,0 +1,26 @@
package allocdir
import (
"syscall"
)
// Hardlinks the shared directory. As a side-effect the shared directory and
// task directory must be on the same filesystem.
func (d *AllocDir) mountSharedDir(dir string) error {
return syscall.Link(d.SharedDir, dir)
}
func (d *AllocDir) unmountSharedDir(dir string) error {
return syscall.Unlink(dir)
}
// MountSpecialDirs mounts the dev and proc file system on the chroot of the
// task. It's a no-op on darwin.
func (d *AllocDir) MountSpecialDirs(taskDir string) error {
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (d *AllocDir) unmountSpecialDirs(taskDir string) error {
return nil
}

View File

@ -0,0 +1,26 @@
package allocdir
import (
"syscall"
)
// Hardlinks the shared directory. As a side-effect the shared directory and
// task directory must be on the same filesystem.
func (d *AllocDir) mountSharedDir(dir string) error {
return syscall.Link(d.SharedDir, dir)
}
func (d *AllocDir) unmountSharedDir(dir string) error {
return syscall.Unlink(dir)
}
// MountSpecialDirs mounts the dev and proc file system on the chroot of the
// task. It's a no-op on FreeBSD right now.
func (d *AllocDir) MountSpecialDirs(taskDir string) error {
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (d *AllocDir) unmountSpecialDirs(taskDir string) error {
return nil
}

View File

@ -0,0 +1,79 @@
package allocdir
import (
"fmt"
"os"
"path/filepath"
"syscall"
"github.com/hashicorp/go-multierror"
)
// Bind mounts the shared directory into the task directory. Must be root to
// run.
func (d *AllocDir) mountSharedDir(taskDir string) error {
if err := os.MkdirAll(taskDir, 0777); err != nil {
return err
}
return syscall.Mount(d.SharedDir, taskDir, "", syscall.MS_BIND, "")
}
func (d *AllocDir) unmountSharedDir(dir string) error {
return syscall.Unmount(dir, 0)
}
// MountSpecialDirs mounts the dev and proc file system from the host to the
// chroot
func (d *AllocDir) MountSpecialDirs(taskDir string) error {
// Mount dev
dev := filepath.Join(taskDir, "dev")
if !d.pathExists(dev) {
if err := os.MkdirAll(dev, 0777); err != nil {
return fmt.Errorf("Mkdir(%v) failed: %v", dev, err)
}
if err := syscall.Mount("none", dev, "devtmpfs", syscall.MS_RDONLY, ""); err != nil {
return fmt.Errorf("Couldn't mount /dev to %v: %v", dev, err)
}
}
// Mount proc
proc := filepath.Join(taskDir, "proc")
if !d.pathExists(proc) {
if err := os.MkdirAll(proc, 0777); err != nil {
return fmt.Errorf("Mkdir(%v) failed: %v", proc, err)
}
if err := syscall.Mount("none", proc, "proc", syscall.MS_RDONLY, ""); err != nil {
return fmt.Errorf("Couldn't mount /proc to %v: %v", proc, err)
}
}
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (d *AllocDir) unmountSpecialDirs(taskDir string) error {
errs := new(multierror.Error)
dev := filepath.Join(taskDir, "dev")
if d.pathExists(dev) {
if err := syscall.Unmount(dev, 0); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev (%v): %v", dev, err))
} else if err := os.RemoveAll(dev); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to delete dev directory (%v): %v", dev, err))
}
}
// Unmount proc.
proc := filepath.Join(taskDir, "proc")
if d.pathExists(proc) {
if err := syscall.Unmount(proc, 0); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc (%v): %v", proc, err))
} else if err := os.RemoveAll(proc); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to delete proc directory (%v): %v", dev, err))
}
}
return errs.ErrorOrNil()
}

View File

@ -0,0 +1,81 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
// Functions shared between linux/darwin.
package allocdir
import (
"fmt"
"os"
"os/user"
"path/filepath"
"strconv"
"golang.org/x/sys/unix"
)
var (
//Path inside container for mounted directory shared across tasks in a task group.
SharedAllocContainerPath = filepath.Join("/", SharedAllocName)
//Path inside container for mounted directory for local storage.
TaskLocalContainerPath = filepath.Join("/", TaskLocal)
)
func (d *AllocDir) linkOrCopy(src, dst string, perm os.FileMode) error {
// Attempt to hardlink.
if err := os.Link(src, dst); err == nil {
return nil
}
return fileCopy(src, dst, perm)
}
func (d *AllocDir) dropDirPermissions(path string) error {
// Can't do anything if not root.
if unix.Geteuid() != 0 {
return nil
}
u, err := user.Lookup("nobody")
if err != nil {
return err
}
uid, err := getUid(u)
if err != nil {
return err
}
gid, err := getGid(u)
if err != nil {
return err
}
if err := os.Chown(path, uid, gid); err != nil {
return fmt.Errorf("Couldn't change owner/group of %v to (uid: %v, gid: %v): %v", path, uid, gid, err)
}
if err := os.Chmod(path, 0777); err != nil {
return fmt.Errorf("Chmod(%v) failed: %v", path, err)
}
return nil
}
func getUid(u *user.User) (int, error) {
uid, err := strconv.Atoi(u.Uid)
if err != nil {
return 0, fmt.Errorf("Unable to convert Uid to an int: %v", err)
}
return uid, nil
}
func getGid(u *user.User) (int, error) {
gid, err := strconv.Atoi(u.Gid)
if err != nil {
return 0, fmt.Errorf("Unable to convert Gid to an int: %v", err)
}
return gid, nil
}

View File

@ -0,0 +1,45 @@
package allocdir
import (
"errors"
"os"
"path/filepath"
)
var (
//Path inside container for mounted directory that is shared across tasks in a task group.
SharedAllocContainerPath = filepath.Join("c:\\", SharedAllocName)
//Path inside container for mounted directory for local storage.
TaskLocalContainerPath = filepath.Join("c:\\", TaskLocal)
)
func (d *AllocDir) linkOrCopy(src, dst string, perm os.FileMode) error {
return fileCopy(src, dst, perm)
}
// The windows version does nothing currently.
func (d *AllocDir) mountSharedDir(dir string) error {
return errors.New("Mount on Windows not supported.")
}
// The windows version does nothing currently.
func (d *AllocDir) dropDirPermissions(path string) error {
return nil
}
// The windows version does nothing currently.
func (d *AllocDir) unmountSharedDir(dir string) error {
return nil
}
// MountSpecialDirs mounts the dev and proc file system on the chroot of the
// task. It's a no-op on windows.
func (d *AllocDir) MountSpecialDirs(taskDir string) error {
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (d *AllocDir) unmountSpecialDirs(taskDir string) error {
return nil
}

View File

@ -0,0 +1,221 @@
package config
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"time"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
)
var (
// DefaultEnvBlacklist is the default set of environment variables that are
// filtered when passing the environment variables of the host to a task.
DefaultEnvBlacklist = strings.Join([]string{
"CONSUL_TOKEN",
"VAULT_TOKEN",
"ATLAS_TOKEN",
"AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN",
"GOOGLE_APPLICATION_CREDENTIALS",
}, ",")
// DefaulUserBlacklist is the default set of users that tasks are not
// allowed to run as when using a driver in "user.checked_drivers"
DefaultUserBlacklist = strings.Join([]string{
"root",
"Administrator",
}, ",")
// DefaultUserCheckedDrivers is the set of drivers we apply the user
// blacklist onto. For virtualized drivers it often doesn't make sense to
// make this stipulation so by default they are ignored.
DefaultUserCheckedDrivers = strings.Join([]string{
"exec",
"qemu",
"java",
}, ",")
)
// RPCHandler can be provided to the Client if there is a local server
// to avoid going over the network. If not provided, the Client will
// maintain a connection pool to the servers
type RPCHandler interface {
RPC(method string, args interface{}, reply interface{}) error
}
// Config is used to parameterize and configure the behavior of the client
type Config struct {
// DevMode controls if we are in a development mode which
// avoids persistent storage.
DevMode bool
// StateDir is where we store our state
StateDir string
// AllocDir is where we store data for allocations
AllocDir string
// LogOutput is the destination for logs
LogOutput io.Writer
// Region is the clients region
Region string
// Network interface to be used in network fingerprinting
NetworkInterface string
// Network speed is the default speed of network interfaces if they can not
// be determined dynamically.
NetworkSpeed int
// MaxKillTimeout allows capping the user-specifiable KillTimeout. If the
// task's KillTimeout is greater than the MaxKillTimeout, MaxKillTimeout is
// used.
MaxKillTimeout time.Duration
// Servers is a list of known server addresses. These are as "host:port"
Servers []string
// RPCHandler can be provided to avoid network traffic if the
// server is running locally.
RPCHandler RPCHandler
// Node provides the base node
Node *structs.Node
// ClientMaxPort is the upper range of the ports that the client uses for
// communicating with plugin subsystems over loopback
ClientMaxPort uint
// ClientMinPort is the lower range of the ports that the client uses for
// communicating with plugin subsystems over loopback
ClientMinPort uint
// GloballyReservedPorts are ports that are reserved across all network
// devices and IPs.
GloballyReservedPorts []int
// A mapping of directories on the host OS to attempt to embed inside each
// task's chroot.
ChrootEnv map[string]string
// Options provides arbitrary key-value configuration for nomad internals,
// like fingerprinters and drivers. The format is:
//
// namespace.option = value
Options map[string]string
// Version is the version of the Nomad client
Version string
// Revision is the commit number of the Nomad client
Revision string
// ConsulConfig is this Agent's Consul configuration
ConsulConfig *config.ConsulConfig
// StatsCollectionInterval is the interval at which the Nomad client
// collects resource usage stats
StatsCollectionInterval time.Duration
// PublishNodeMetrics determines whether nomad is going to publish node
// level metrics to remote Telemetry sinks
PublishNodeMetrics bool
// PublishAllocationMetrics determines whether nomad is going to publish
// allocation metrics to remote Telemetry sinks
PublishAllocationMetrics bool
}
func (c *Config) Copy() *Config {
nc := new(Config)
*nc = *c
nc.Node = nc.Node.Copy()
nc.Servers = structs.CopySliceString(nc.Servers)
nc.Options = structs.CopyMapStringString(nc.Options)
return nc
}
// DefaultConfig returns the default configuration
func DefaultConfig() *Config {
return &Config{
ConsulConfig: config.DefaultConsulConfig(),
LogOutput: os.Stderr,
Region: "global",
StatsCollectionInterval: 1 * time.Second,
}
}
// Read returns the specified configuration value or "".
func (c *Config) Read(id string) string {
return c.Options[id]
}
// ReadDefault returns the specified configuration value, or the specified
// default value if none is set.
func (c *Config) ReadDefault(id string, defaultValue string) string {
val, ok := c.Options[id]
if !ok {
return defaultValue
}
return val
}
// ReadBool parses the specified option as a boolean.
func (c *Config) ReadBool(id string) (bool, error) {
val, ok := c.Options[id]
if !ok {
return false, fmt.Errorf("Specified config is missing from options")
}
bval, err := strconv.ParseBool(val)
if err != nil {
return false, fmt.Errorf("Failed to parse %s as bool: %s", val, err)
}
return bval, nil
}
// ReadBoolDefault tries to parse the specified option as a boolean. If there is
// an error in parsing, the default option is returned.
func (c *Config) ReadBoolDefault(id string, defaultValue bool) bool {
val, err := c.ReadBool(id)
if err != nil {
return defaultValue
}
return val
}
// ReadStringListToMap tries to parse the specified option as a comma separated list.
// If there is an error in parsing, an empty list is returned.
func (c *Config) ReadStringListToMap(key string) map[string]struct{} {
s := strings.TrimSpace(c.Read(key))
list := make(map[string]struct{})
if s != "" {
for _, e := range strings.Split(s, ",") {
trimmed := strings.TrimSpace(e)
list[trimmed] = struct{}{}
}
}
return list
}
// ReadStringListToMap tries to parse the specified option as a comma separated list.
// If there is an error in parsing, an empty list is returned.
func (c *Config) ReadStringListToMapDefault(key, defaultValue string) map[string]struct{} {
val, ok := c.Options[key]
if !ok {
val = defaultValue
}
list := make(map[string]struct{})
if val != "" {
for _, e := range strings.Split(val, ",") {
trimmed := strings.TrimSpace(e)
list[trimmed] = struct{}{}
}
}
return list
}

1085
vendor/github.com/hashicorp/nomad/client/driver/docker.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,14 @@
//+build !windows
package driver
import docker "github.com/fsouza/go-dockerclient"
const (
// Setting default network mode for non-windows OS as bridge
defaultNetworkMode = "bridge"
)
func getPortBinding(ip string, port string) []docker.PortBinding {
return []docker.PortBinding{docker.PortBinding{HostIP: ip, HostPort: port}}
}

View File

@ -0,0 +1,13 @@
package driver
import docker "github.com/fsouza/go-dockerclient"
const (
// Default network mode for windows containers is nat
defaultNetworkMode = "nat"
)
//Currently Windows containers don't support host ip in port binding.
func getPortBinding(ip string, port string) []docker.PortBinding {
return []docker.PortBinding{docker.PortBinding{HostIP: "", HostPort: port}}
}

View File

@ -0,0 +1,192 @@
package driver
import (
"fmt"
"log"
"path/filepath"
"sync"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/nomad/structs"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
cstructs "github.com/hashicorp/nomad/client/structs"
)
// BuiltinDrivers contains the built in registered drivers
// which are available for allocation handling
var BuiltinDrivers = map[string]Factory{
"docker": NewDockerDriver,
"exec": NewExecDriver,
"raw_exec": NewRawExecDriver,
"java": NewJavaDriver,
"qemu": NewQemuDriver,
"rkt": NewRktDriver,
}
// NewDriver is used to instantiate and return a new driver
// given the name and a logger
func NewDriver(name string, ctx *DriverContext) (Driver, error) {
// Lookup the factory function
factory, ok := BuiltinDrivers[name]
if !ok {
return nil, fmt.Errorf("unknown driver '%s'", name)
}
// Instantiate the driver
f := factory(ctx)
return f, nil
}
// Factory is used to instantiate a new Driver
type Factory func(*DriverContext) Driver
// Driver is used for execution of tasks. This allows Nomad
// to support many pluggable implementations of task drivers.
// Examples could include LXC, Docker, Qemu, etc.
type Driver interface {
// Drivers must support the fingerprint interface for detection
fingerprint.Fingerprint
// Start is used to being task execution
Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error)
// Open is used to re-open a handle to a task
Open(ctx *ExecContext, handleID string) (DriverHandle, error)
// Drivers must validate their configuration
Validate(map[string]interface{}) error
}
// DriverContext is a means to inject dependencies such as loggers, configs, and
// node attributes into a Driver without having to change the Driver interface
// each time we do it. Used in conjection with Factory, above.
type DriverContext struct {
taskName string
config *config.Config
logger *log.Logger
node *structs.Node
taskEnv *env.TaskEnvironment
}
// NewEmptyDriverContext returns a DriverContext with all fields set to their
// zero value.
func NewEmptyDriverContext() *DriverContext {
return &DriverContext{
taskName: "",
config: nil,
node: nil,
logger: nil,
taskEnv: nil,
}
}
// NewDriverContext initializes a new DriverContext with the specified fields.
// This enables other packages to create DriverContexts but keeps the fields
// private to the driver. If we want to change this later we can gorename all of
// the fields in DriverContext.
func NewDriverContext(taskName string, config *config.Config, node *structs.Node,
logger *log.Logger, taskEnv *env.TaskEnvironment) *DriverContext {
return &DriverContext{
taskName: taskName,
config: config,
node: node,
logger: logger,
taskEnv: taskEnv,
}
}
// DriverHandle is an opaque handle into a driver used for task
// manipulation
type DriverHandle interface {
// Returns an opaque handle that can be used to re-open the handle
ID() string
// WaitCh is used to return a channel used wait for task completion
WaitCh() chan *dstructs.WaitResult
// Update is used to update the task if possible and update task related
// configurations.
Update(task *structs.Task) error
// Kill is used to stop the task
Kill() error
// Stats returns aggregated stats of the driver
Stats() (*cstructs.TaskResourceUsage, error)
}
// ExecContext is shared between drivers within an allocation
type ExecContext struct {
sync.Mutex
// AllocDir contains information about the alloc directory structure.
AllocDir *allocdir.AllocDir
// Alloc ID
AllocID string
}
// NewExecContext is used to create a new execution context
func NewExecContext(alloc *allocdir.AllocDir, allocID string) *ExecContext {
return &ExecContext{AllocDir: alloc, AllocID: allocID}
}
// GetTaskEnv converts the alloc dir, the node, task and alloc into a
// TaskEnvironment.
func GetTaskEnv(allocDir *allocdir.AllocDir, node *structs.Node,
task *structs.Task, alloc *structs.Allocation) (*env.TaskEnvironment, error) {
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
env := env.NewTaskEnvironment(node).
SetTaskMeta(task.Meta).
SetTaskGroupMeta(tg.Meta).
SetJobMeta(alloc.Job.Meta).
SetEnvvars(task.Env).
SetTaskName(task.Name)
if allocDir != nil {
env.SetAllocDir(allocDir.SharedDir)
taskdir, ok := allocDir.TaskDirs[task.Name]
if !ok {
return nil, fmt.Errorf("failed to get task directory for task %q", task.Name)
}
env.SetTaskLocalDir(filepath.Join(taskdir, allocdir.TaskLocal))
}
if task.Resources != nil {
env.SetMemLimit(task.Resources.MemoryMB).
SetCpuLimit(task.Resources.CPU).
SetNetworks(task.Resources.Networks)
}
if alloc != nil {
env.SetAlloc(alloc)
}
return env.Build(), nil
}
func mapMergeStrInt(maps ...map[string]int) map[string]int {
out := map[string]int{}
for _, in := range maps {
for key, val := range in {
out[key] = val
}
}
return out
}
func mapMergeStrStr(maps ...map[string]string) map[string]string {
out := map[string]string{}
for _, in := range maps {
for key, val := range in {
out[key] = val
}
}
return out
}

View File

@ -0,0 +1,430 @@
package env
import (
"fmt"
"os"
"strconv"
"strings"
hargs "github.com/hashicorp/nomad/helper/args"
"github.com/hashicorp/nomad/nomad/structs"
)
// A set of environment variables that are exported by each driver.
const (
// AllocDir is the environment variable with the path to the alloc directory
// that is shared across tasks within a task group.
AllocDir = "NOMAD_ALLOC_DIR"
// TaskLocalDir is the environment variable with the path to the tasks local
// directory where it can store data that is persisted to the alloc is
// removed.
TaskLocalDir = "NOMAD_TASK_DIR"
// MemLimit is the environment variable with the tasks memory limit in MBs.
MemLimit = "NOMAD_MEMORY_LIMIT"
// CpuLimit is the environment variable with the tasks CPU limit in MHz.
CpuLimit = "NOMAD_CPU_LIMIT"
// AllocID is the environment variable for passing the allocation ID.
AllocID = "NOMAD_ALLOC_ID"
// AllocName is the environment variable for passing the allocation name.
AllocName = "NOMAD_ALLOC_NAME"
// TaskName is the environment variable for passing the task name.
TaskName = "NOMAD_TASK_NAME"
// AllocIndex is the environment variable for passing the allocation index.
AllocIndex = "NOMAD_ALLOC_INDEX"
// AddrPrefix is the prefix for passing both dynamic and static port
// allocations to tasks.
// E.g$NOMAD_ADDR_http=127.0.0.1:80
AddrPrefix = "NOMAD_ADDR_"
// IpPrefix is the prefix for passing the IP of a port allocation to a task.
IpPrefix = "NOMAD_IP_"
// PortPrefix is the prefix for passing the port allocation to a task.
PortPrefix = "NOMAD_PORT_"
// HostPortPrefix is the prefix for passing the host port when a portmap is
// specified.
HostPortPrefix = "NOMAD_HOST_PORT_"
// MetaPrefix is the prefix for passing task meta data.
MetaPrefix = "NOMAD_META_"
)
// The node values that can be interpreted.
const (
nodeIdKey = "node.unique.id"
nodeDcKey = "node.datacenter"
nodeNameKey = "node.unique.name"
nodeClassKey = "node.class"
// Prefixes used for lookups.
nodeAttributePrefix = "attr."
nodeMetaPrefix = "meta."
)
// TaskEnvironment is used to expose information to a task via environment
// variables and provide interpolation of Nomad variables.
type TaskEnvironment struct {
Env map[string]string
TaskMeta map[string]string
TaskGroupMeta map[string]string
JobMeta map[string]string
AllocDir string
TaskDir string
CpuLimit int
MemLimit int
TaskName string
AllocIndex int
AllocId string
AllocName string
Node *structs.Node
Networks []*structs.NetworkResource
PortMap map[string]int
// taskEnv is the variables that will be set in the tasks environment
TaskEnv map[string]string
// nodeValues is the values that are allowed for interprolation from the
// node.
NodeValues map[string]string
}
func NewTaskEnvironment(node *structs.Node) *TaskEnvironment {
return &TaskEnvironment{Node: node, AllocIndex: -1}
}
// ParseAndReplace takes the user supplied args replaces any instance of an
// environment variable or nomad variable in the args with the actual value.
func (t *TaskEnvironment) ParseAndReplace(args []string) []string {
replaced := make([]string, len(args))
for i, arg := range args {
replaced[i] = hargs.ReplaceEnv(arg, t.TaskEnv, t.NodeValues)
}
return replaced
}
// ReplaceEnv takes an arg and replaces all occurrences of environment variables
// and nomad variables. If the variable is found in the passed map it is
// replaced, otherwise the original string is returned.
func (t *TaskEnvironment) ReplaceEnv(arg string) string {
return hargs.ReplaceEnv(arg, t.TaskEnv, t.NodeValues)
}
// Build must be called after all the tasks environment values have been set.
func (t *TaskEnvironment) Build() *TaskEnvironment {
t.NodeValues = make(map[string]string)
t.TaskEnv = make(map[string]string)
// Build the meta with the following precedence: task, task group, job.
for _, meta := range []map[string]string{t.JobMeta, t.TaskGroupMeta, t.TaskMeta} {
for k, v := range meta {
t.TaskEnv[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = v
}
}
// Build the ports
for _, network := range t.Networks {
for label, value := range network.MapLabelToValues(nil) {
t.TaskEnv[fmt.Sprintf("%s%s", IpPrefix, label)] = network.IP
t.TaskEnv[fmt.Sprintf("%s%s", HostPortPrefix, label)] = strconv.Itoa(value)
if forwardedPort, ok := t.PortMap[label]; ok {
value = forwardedPort
}
t.TaskEnv[fmt.Sprintf("%s%s", PortPrefix, label)] = fmt.Sprintf("%d", value)
IPPort := fmt.Sprintf("%s:%d", network.IP, value)
t.TaskEnv[fmt.Sprintf("%s%s", AddrPrefix, label)] = IPPort
}
}
// Build the directories
if t.AllocDir != "" {
t.TaskEnv[AllocDir] = t.AllocDir
}
if t.TaskDir != "" {
t.TaskEnv[TaskLocalDir] = t.TaskDir
}
// Build the resource limits
if t.MemLimit != 0 {
t.TaskEnv[MemLimit] = strconv.Itoa(t.MemLimit)
}
if t.CpuLimit != 0 {
t.TaskEnv[CpuLimit] = strconv.Itoa(t.CpuLimit)
}
// Build the tasks ids
if t.AllocId != "" {
t.TaskEnv[AllocID] = t.AllocId
}
if t.AllocName != "" {
t.TaskEnv[AllocName] = t.AllocName
}
if t.AllocIndex != -1 {
t.TaskEnv[AllocIndex] = strconv.Itoa(t.AllocIndex)
}
if t.TaskName != "" {
t.TaskEnv[TaskName] = t.TaskName
}
// Build the node
if t.Node != nil {
// Set up the node values.
t.NodeValues[nodeIdKey] = t.Node.ID
t.NodeValues[nodeDcKey] = t.Node.Datacenter
t.NodeValues[nodeNameKey] = t.Node.Name
t.NodeValues[nodeClassKey] = t.Node.NodeClass
// Set up the attributes.
for k, v := range t.Node.Attributes {
t.NodeValues[fmt.Sprintf("%s%s", nodeAttributePrefix, k)] = v
}
// Set up the meta.
for k, v := range t.Node.Meta {
t.NodeValues[fmt.Sprintf("%s%s", nodeMetaPrefix, k)] = v
}
}
// Interpret the environment variables
interpreted := make(map[string]string, len(t.Env))
for k, v := range t.Env {
interpreted[k] = hargs.ReplaceEnv(v, t.NodeValues, t.TaskEnv)
}
for k, v := range interpreted {
t.TaskEnv[k] = v
}
return t
}
// EnvList returns a list of strings with NAME=value pairs.
func (t *TaskEnvironment) EnvList() []string {
env := []string{}
for k, v := range t.TaskEnv {
env = append(env, fmt.Sprintf("%s=%s", k, v))
}
return env
}
// EnvMap returns a copy of the tasks environment variables.
func (t *TaskEnvironment) EnvMap() map[string]string {
m := make(map[string]string, len(t.TaskEnv))
for k, v := range t.TaskEnv {
m[k] = v
}
return m
}
// Builder methods to build the TaskEnvironment
func (t *TaskEnvironment) SetAllocDir(dir string) *TaskEnvironment {
t.AllocDir = dir
return t
}
func (t *TaskEnvironment) ClearAllocDir() *TaskEnvironment {
t.AllocDir = ""
return t
}
func (t *TaskEnvironment) SetTaskLocalDir(dir string) *TaskEnvironment {
t.TaskDir = dir
return t
}
func (t *TaskEnvironment) ClearTaskLocalDir() *TaskEnvironment {
t.TaskDir = ""
return t
}
func (t *TaskEnvironment) SetMemLimit(limit int) *TaskEnvironment {
t.MemLimit = limit
return t
}
func (t *TaskEnvironment) ClearMemLimit() *TaskEnvironment {
t.MemLimit = 0
return t
}
func (t *TaskEnvironment) SetCpuLimit(limit int) *TaskEnvironment {
t.CpuLimit = limit
return t
}
func (t *TaskEnvironment) ClearCpuLimit() *TaskEnvironment {
t.CpuLimit = 0
return t
}
func (t *TaskEnvironment) SetNetworks(networks []*structs.NetworkResource) *TaskEnvironment {
t.Networks = networks
return t
}
func (t *TaskEnvironment) clearNetworks() *TaskEnvironment {
t.Networks = nil
return t
}
func (t *TaskEnvironment) SetPortMap(portMap map[string]int) *TaskEnvironment {
t.PortMap = portMap
return t
}
func (t *TaskEnvironment) clearPortMap() *TaskEnvironment {
t.PortMap = nil
return t
}
// Takes a map of meta values to be passed to the task. The keys are capatilized
// when the environent variable is set.
func (t *TaskEnvironment) SetTaskMeta(m map[string]string) *TaskEnvironment {
t.TaskMeta = m
return t
}
func (t *TaskEnvironment) ClearTaskMeta() *TaskEnvironment {
t.TaskMeta = nil
return t
}
func (t *TaskEnvironment) SetTaskGroupMeta(m map[string]string) *TaskEnvironment {
t.TaskGroupMeta = m
return t
}
func (t *TaskEnvironment) ClearTaskGroupMeta() *TaskEnvironment {
t.TaskGroupMeta = nil
return t
}
func (t *TaskEnvironment) SetJobMeta(m map[string]string) *TaskEnvironment {
t.JobMeta = m
return t
}
func (t *TaskEnvironment) ClearJobMeta() *TaskEnvironment {
t.JobMeta = nil
return t
}
func (t *TaskEnvironment) SetEnvvars(m map[string]string) *TaskEnvironment {
t.Env = m
return t
}
// Appends the given environment variables.
func (t *TaskEnvironment) AppendEnvvars(m map[string]string) *TaskEnvironment {
if t.Env == nil {
t.Env = make(map[string]string, len(m))
}
for k, v := range m {
t.Env[k] = v
}
return t
}
// AppendHostEnvvars adds the host environment variables to the tasks. The
// filter parameter can be use to filter host environment from entering the
// tasks.
func (t *TaskEnvironment) AppendHostEnvvars(filter []string) *TaskEnvironment {
hostEnv := os.Environ()
if t.Env == nil {
t.Env = make(map[string]string, len(hostEnv))
}
// Index the filtered environment variables.
index := make(map[string]struct{}, len(filter))
for _, f := range filter {
index[f] = struct{}{}
}
for _, e := range hostEnv {
parts := strings.SplitN(e, "=", 2)
key, value := parts[0], parts[1]
// Skip filtered environment variables
if _, filtered := index[key]; filtered {
continue
}
// Don't override the tasks environment variables.
if _, existing := t.Env[key]; !existing {
t.Env[key] = value
}
}
return t
}
func (t *TaskEnvironment) ClearEnvvars() *TaskEnvironment {
t.Env = nil
return t
}
// Helper method for setting all fields from an allocation.
func (t *TaskEnvironment) SetAlloc(alloc *structs.Allocation) *TaskEnvironment {
t.AllocId = alloc.ID
t.AllocName = alloc.Name
t.AllocIndex = alloc.Index()
return t
}
// Helper method for clearing all fields from an allocation.
func (t *TaskEnvironment) ClearAlloc(alloc *structs.Allocation) *TaskEnvironment {
return t.ClearAllocId().ClearAllocName().ClearAllocIndex()
}
func (t *TaskEnvironment) SetAllocIndex(index int) *TaskEnvironment {
t.AllocIndex = index
return t
}
func (t *TaskEnvironment) ClearAllocIndex() *TaskEnvironment {
t.AllocIndex = -1
return t
}
func (t *TaskEnvironment) SetAllocId(id string) *TaskEnvironment {
t.AllocId = id
return t
}
func (t *TaskEnvironment) ClearAllocId() *TaskEnvironment {
t.AllocId = ""
return t
}
func (t *TaskEnvironment) SetAllocName(name string) *TaskEnvironment {
t.AllocName = name
return t
}
func (t *TaskEnvironment) ClearAllocName() *TaskEnvironment {
t.AllocName = ""
return t
}
func (t *TaskEnvironment) SetTaskName(name string) *TaskEnvironment {
t.TaskName = name
return t
}
func (t *TaskEnvironment) ClearTaskName() *TaskEnvironment {
t.TaskName = ""
return t
}

319
vendor/github.com/hashicorp/nomad/client/driver/exec.go generated vendored Normal file
View File

@ -0,0 +1,319 @@
package driver
import (
"encoding/json"
"fmt"
"log"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/discover"
"github.com/hashicorp/nomad/helper/fields"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/mapstructure"
)
const (
// The key populated in Node Attributes to indicate the presence of the Exec
// driver
execDriverAttr = "driver.exec"
)
// ExecDriver fork/execs tasks using as many of the underlying OS's isolation
// features.
type ExecDriver struct {
DriverContext
}
type ExecDriverConfig struct {
Command string `mapstructure:"command"`
Args []string `mapstructure:"args"`
}
// execHandle is returned from Start/Open as a handle to the PID
type execHandle struct {
pluginClient *plugin.Client
executor executor.Executor
isolationConfig *dstructs.IsolationConfig
userPid int
allocDir *allocdir.AllocDir
killTimeout time.Duration
maxKillTimeout time.Duration
logger *log.Logger
waitCh chan *dstructs.WaitResult
doneCh chan struct{}
version string
}
// NewExecDriver is used to create a new exec driver
func NewExecDriver(ctx *DriverContext) Driver {
return &ExecDriver{DriverContext: *ctx}
}
// Validate is used to validate the driver configuration
func (d *ExecDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"command": &fields.FieldSchema{
Type: fields.TypeString,
Required: true,
},
"args": &fields.FieldSchema{
Type: fields.TypeArray,
},
},
}
if err := fd.Validate(); err != nil {
return err
}
return nil
}
func (d *ExecDriver) Periodic() (bool, time.Duration) {
return true, 15 * time.Second
}
func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig ExecDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
// Get the command to be ran
command := driverConfig.Command
if err := validateCommand(command, "args"); err != nil {
return nil, err
}
// Set the host environment variables.
filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
d.taskEnv.AppendHostEnvvars(filter)
// Get the task directory for storing the executor logs.
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "exec",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
ChrootEnv: d.config.ChrootEnv,
Task: task,
}
ps, err := exec.LaunchCmd(&executor.ExecCommand{
Cmd: command,
Args: driverConfig.Args,
FSIsolation: true,
ResourceLimits: true,
User: getExecutorUser(task),
}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[DEBUG] driver.exec: started process via plugin with pid: %v", ps.Pid)
// Return a driver handle
maxKill := d.DriverContext.config.MaxKillTimeout
h := &execHandle{
pluginClient: pluginClient,
userPid: ps.Pid,
executor: exec,
allocDir: ctx.AllocDir,
isolationConfig: ps.IsolationConfig,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
logger: d.logger,
version: d.config.Version,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := exec.SyncServices(consulContext(d.config, "")); err != nil {
d.logger.Printf("[ERR] driver.exec: error registering services with consul for task: %q: %v", task.Name, err)
}
go h.run()
return h, nil
}
type execId struct {
Version string
KillTimeout time.Duration
MaxKillTimeout time.Duration
UserPid int
TaskDir string
AllocDir *allocdir.AllocDir
IsolationConfig *dstructs.IsolationConfig
PluginConfig *PluginReattachConfig
}
func (d *ExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
id := &execId{}
if err := json.Unmarshal([]byte(handleID), id); err != nil {
return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
}
pluginConfig := &plugin.ClientConfig{
Reattach: id.PluginConfig.PluginConfig(),
}
exec, client, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
merrs := new(multierror.Error)
merrs.Errors = append(merrs.Errors, err)
d.logger.Println("[ERR] driver.exec: error connecting to plugin so destroying plugin pid and user pid")
if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil {
merrs.Errors = append(merrs.Errors, fmt.Errorf("error destroying plugin and userpid: %v", e))
}
if id.IsolationConfig != nil {
ePid := pluginConfig.Reattach.Pid
if e := executor.ClientCleanup(id.IsolationConfig, ePid); e != nil {
merrs.Errors = append(merrs.Errors, fmt.Errorf("destroying cgroup failed: %v", e))
}
}
if e := ctx.AllocDir.UnmountAll(); e != nil {
merrs.Errors = append(merrs.Errors, e)
}
return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil())
}
ver, _ := exec.Version()
d.logger.Printf("[DEBUG] driver.exec : version of executor: %v", ver.Version)
// Return a driver handle
h := &execHandle{
pluginClient: client,
executor: exec,
userPid: id.UserPid,
allocDir: id.AllocDir,
isolationConfig: id.IsolationConfig,
logger: d.logger,
version: id.Version,
killTimeout: id.KillTimeout,
maxKillTimeout: id.MaxKillTimeout,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := exec.SyncServices(consulContext(d.config, "")); err != nil {
d.logger.Printf("[ERR] driver.exec: error registering services with consul: %v", err)
}
go h.run()
return h, nil
}
func (h *execHandle) ID() string {
id := execId{
Version: h.version,
KillTimeout: h.killTimeout,
MaxKillTimeout: h.maxKillTimeout,
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
UserPid: h.userPid,
AllocDir: h.allocDir,
IsolationConfig: h.isolationConfig,
}
data, err := json.Marshal(id)
if err != nil {
h.logger.Printf("[ERR] driver.exec: failed to marshal ID to JSON: %s", err)
}
return string(data)
}
func (h *execHandle) WaitCh() chan *dstructs.WaitResult {
return h.waitCh
}
func (h *execHandle) Update(task *structs.Task) error {
// Store the updated kill timeout.
h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout)
h.executor.UpdateTask(task)
// Update is not possible
return nil
}
func (h *execHandle) Kill() error {
if err := h.executor.ShutDown(); err != nil {
if h.pluginClient.Exited() {
return nil
}
return fmt.Errorf("executor Shutdown failed: %v", err)
}
select {
case <-h.doneCh:
return nil
case <-time.After(h.killTimeout):
if h.pluginClient.Exited() {
return nil
}
if err := h.executor.Exit(); err != nil {
return fmt.Errorf("executor Exit failed: %v", err)
}
return nil
}
}
func (h *execHandle) Stats() (*cstructs.TaskResourceUsage, error) {
return h.executor.Stats()
}
func (h *execHandle) run() {
ps, err := h.executor.Wait()
close(h.doneCh)
// If the exitcode is 0 and we had an error that means the plugin didn't
// connect and doesn't know the state of the user process so we are killing
// the user process so that when we create a new executor on restarting the
// new user process doesn't have collisions with resources that the older
// user pid might be holding onto.
if ps.ExitCode == 0 && err != nil {
if h.isolationConfig != nil {
ePid := h.pluginClient.ReattachConfig().Pid
if e := executor.ClientCleanup(h.isolationConfig, ePid); e != nil {
h.logger.Printf("[ERR] driver.exec: destroying resource container failed: %v", e)
}
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERR] driver.exec: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
h.waitCh <- dstructs.NewWaitResult(ps.ExitCode, ps.Signal, err)
close(h.waitCh)
// Remove services
if err := h.executor.DeregisterServices(); err != nil {
h.logger.Printf("[ERR] driver.exec: failed to deregister services: %v", err)
}
if err := h.executor.Exit(); err != nil {
h.logger.Printf("[ERR] driver.exec: error destroying executor: %v", err)
}
h.pluginClient.Kill()
}

View File

@ -0,0 +1,12 @@
//+build darwin dragonfly freebsd netbsd openbsd solaris windows
package driver
import (
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
)
func (d *ExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
return false, nil
}

View File

@ -0,0 +1,34 @@
package driver
import (
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
"golang.org/x/sys/unix"
)
func (d *ExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Get the current status so that we can log any debug messages only if the
// state changes
_, currentlyEnabled := node.Attributes[execDriverAttr]
// Only enable if cgroups are available and we are root
if _, ok := node.Attributes["unique.cgroup.mountpoint"]; !ok {
if currentlyEnabled {
d.logger.Printf("[DEBUG] driver.exec: cgroups unavailable, disabling")
}
delete(node.Attributes, execDriverAttr)
return false, nil
} else if unix.Geteuid() != 0 {
if currentlyEnabled {
d.logger.Printf("[DEBUG] driver.exec: must run as root user, disabling")
}
delete(node.Attributes, execDriverAttr)
return false, nil
}
if !currentlyEnabled {
d.logger.Printf("[DEBUG] driver.exec: exec driver is enabled")
}
node.Attributes[execDriverAttr] = "1"
return true, nil
}

View File

@ -0,0 +1,206 @@
package executor
import (
"fmt"
"log"
"os/exec"
"sync"
"syscall"
"time"
"github.com/armon/circbuf"
docker "github.com/fsouza/go-dockerclient"
cstructs "github.com/hashicorp/nomad/client/driver/structs"
)
var (
// We store the client globally to cache the connection to the docker daemon.
createClient sync.Once
client *docker.Client
)
const (
// The default check timeout
defaultCheckTimeout = 30 * time.Second
)
// DockerScriptCheck runs nagios compatible scripts in a docker container and
// provides the check result
type DockerScriptCheck struct {
id string // id of the check
interval time.Duration // interval of the check
timeout time.Duration // timeout of the check
containerID string // container id in which the check will be invoked
logger *log.Logger
cmd string // check command
args []string // check command arguments
dockerEndpoint string // docker endpoint
tlsCert string // path to tls certificate
tlsCa string // path to tls ca
tlsKey string // path to tls key
}
// dockerClient creates the client to interact with the docker daemon
func (d *DockerScriptCheck) dockerClient() (*docker.Client, error) {
if client != nil {
return client, nil
}
var err error
createClient.Do(func() {
if d.dockerEndpoint != "" {
if d.tlsCert+d.tlsKey+d.tlsCa != "" {
d.logger.Printf("[DEBUG] executor.checks: using TLS client connection to %s", d.dockerEndpoint)
client, err = docker.NewTLSClient(d.dockerEndpoint, d.tlsCert, d.tlsKey, d.tlsCa)
} else {
d.logger.Printf("[DEBUG] executor.checks: using standard client connection to %s", d.dockerEndpoint)
client, err = docker.NewClient(d.dockerEndpoint)
}
return
}
d.logger.Println("[DEBUG] executor.checks: using client connection initialized from environment")
client, err = docker.NewClientFromEnv()
})
return client, err
}
// Run runs a script check inside a docker container
func (d *DockerScriptCheck) Run() *cstructs.CheckResult {
var (
exec *docker.Exec
err error
execRes *docker.ExecInspect
time = time.Now()
)
if client, err = d.dockerClient(); err != nil {
return &cstructs.CheckResult{Err: err}
}
client = client
execOpts := docker.CreateExecOptions{
AttachStdin: false,
AttachStdout: true,
AttachStderr: true,
Tty: false,
Cmd: append([]string{d.cmd}, d.args...),
Container: d.containerID,
}
if exec, err = client.CreateExec(execOpts); err != nil {
return &cstructs.CheckResult{Err: err}
}
output, _ := circbuf.NewBuffer(int64(cstructs.CheckBufSize))
startOpts := docker.StartExecOptions{
Detach: false,
Tty: false,
OutputStream: output,
ErrorStream: output,
}
if err = client.StartExec(exec.ID, startOpts); err != nil {
return &cstructs.CheckResult{Err: err}
}
if execRes, err = client.InspectExec(exec.ID); err != nil {
return &cstructs.CheckResult{Err: err}
}
return &cstructs.CheckResult{
ExitCode: execRes.ExitCode,
Output: string(output.Bytes()),
Timestamp: time,
}
}
// ID returns the check id
func (d *DockerScriptCheck) ID() string {
return d.id
}
// Interval returns the interval at which the check has to run
func (d *DockerScriptCheck) Interval() time.Duration {
return d.interval
}
// Timeout returns the duration after which a check is timed out.
func (d *DockerScriptCheck) Timeout() time.Duration {
if d.timeout == 0 {
return defaultCheckTimeout
}
return d.timeout
}
// ExecScriptCheck runs a nagios compatible script and returns the check result
type ExecScriptCheck struct {
id string // id of the script check
interval time.Duration // interval at which the check is invoked
timeout time.Duration // timeout duration of the check
cmd string // command of the check
args []string // args passed to the check
taskDir string // the root directory of the check
FSIsolation bool // indicates whether the check has to be run within a chroot
}
// Run runs an exec script check
func (e *ExecScriptCheck) Run() *cstructs.CheckResult {
buf, _ := circbuf.NewBuffer(int64(cstructs.CheckBufSize))
cmd := exec.Command(e.cmd, e.args...)
cmd.Stdout = buf
cmd.Stderr = buf
e.setChroot(cmd)
ts := time.Now()
if err := cmd.Start(); err != nil {
return &cstructs.CheckResult{Err: err}
}
errCh := make(chan error, 2)
go func() {
errCh <- cmd.Wait()
}()
for {
select {
case err := <-errCh:
endTime := time.Now()
if err == nil {
return &cstructs.CheckResult{
ExitCode: 0,
Output: string(buf.Bytes()),
Timestamp: ts,
}
}
exitCode := 1
if exitErr, ok := err.(*exec.ExitError); ok {
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
exitCode = status.ExitStatus()
}
}
return &cstructs.CheckResult{
ExitCode: exitCode,
Output: string(buf.Bytes()),
Timestamp: ts,
Duration: endTime.Sub(ts),
}
case <-time.After(e.Timeout()):
errCh <- fmt.Errorf("timed out after waiting 30s")
}
}
return nil
}
// ID returns the check id
func (e *ExecScriptCheck) ID() string {
return e.id
}
// Interval returns the interval at which the check has to run
func (e *ExecScriptCheck) Interval() time.Duration {
return e.interval
}
// Timeout returns the duration after which a check is timed out.
func (e *ExecScriptCheck) Timeout() time.Duration {
if e.timeout == 0 {
return defaultCheckTimeout
}
return e.timeout
}

View File

@ -0,0 +1,18 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package executor
import (
"os/exec"
"syscall"
)
func (e *ExecScriptCheck) setChroot(cmd *exec.Cmd) {
if e.FSIsolation {
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &syscall.SysProcAttr{}
}
cmd.SysProcAttr.Chroot = e.taskDir
}
cmd.Dir = "/"
}

View File

@ -0,0 +1,8 @@
// +build windows
package executor
import "os/exec"
func (e *ExecScriptCheck) setChroot(cmd *exec.Cmd) {
}

View File

@ -0,0 +1,856 @@
package executor
import (
"fmt"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/go-ps"
"github.com/shirou/gopsutil/process"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/client/driver/logging"
"github.com/hashicorp/nomad/client/stats"
"github.com/hashicorp/nomad/command/agent/consul"
shelpers "github.com/hashicorp/nomad/helper/stats"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
cstructs "github.com/hashicorp/nomad/client/structs"
)
const (
// pidScanInterval is the interval at which the executor scans the process
// tree for finding out the pids that the executor and it's child processes
// have forked
pidScanInterval = 5 * time.Second
)
var (
// The statistics the basic executor exposes
ExecutorBasicMeasuredMemStats = []string{"RSS", "Swap"}
ExecutorBasicMeasuredCpuStats = []string{"System Mode", "User Mode", "Percent"}
)
// Executor is the interface which allows a driver to launch and supervise
// a process
type Executor interface {
LaunchCmd(command *ExecCommand, ctx *ExecutorContext) (*ProcessState, error)
LaunchSyslogServer(ctx *ExecutorContext) (*SyslogServerState, error)
Wait() (*ProcessState, error)
ShutDown() error
Exit() error
UpdateLogConfig(logConfig *structs.LogConfig) error
UpdateTask(task *structs.Task) error
SyncServices(ctx *ConsulContext) error
DeregisterServices() error
Version() (*ExecutorVersion, error)
Stats() (*cstructs.TaskResourceUsage, error)
}
// ConsulContext holds context to configure the Consul client and run checks
type ConsulContext struct {
// ConsulConfig contains the configuration information for talking
// with this Nomad Agent's Consul Agent.
ConsulConfig *config.ConsulConfig
// ContainerID is the ID of the container
ContainerID string
// TLSCert is the cert which docker client uses while interactng with the docker
// daemon over TLS
TLSCert string
// TLSCa is the CA which the docker client uses while interacting with the docker
// daeemon over TLS
TLSCa string
// TLSKey is the TLS key which the docker client uses while interacting with
// the docker daemon
TLSKey string
// DockerEndpoint is the endpoint of the docker daemon
DockerEndpoint string
}
// ExecutorContext holds context to configure the command user
// wants to run and isolate it
type ExecutorContext struct {
// TaskEnv holds information about the environment of a Task
TaskEnv *env.TaskEnvironment
// AllocDir is the handle to do operations on the alloc dir of
// the task
AllocDir *allocdir.AllocDir
// Task is the task whose executor is being launched
Task *structs.Task
// AllocID is the allocation id to which the task belongs
AllocID string
// A mapping of directories on the host OS to attempt to embed inside each
// task's chroot.
ChrootEnv map[string]string
// Driver is the name of the driver that invoked the executor
Driver string
// PortUpperBound is the upper bound of the ports that we can use to start
// the syslog server
PortUpperBound uint
// PortLowerBound is the lower bound of the ports that we can use to start
// the syslog server
PortLowerBound uint
}
// ExecCommand holds the user command, args, and other isolation related
// settings.
type ExecCommand struct {
// Cmd is the command that the user wants to run.
Cmd string
// Args is the args of the command that the user wants to run.
Args []string
// FSIsolation determines whether the command would be run in a chroot.
FSIsolation bool
// User is the user which the executor uses to run the command.
User string
// ResourceLimits determines whether resource limits are enforced by the
// executor.
ResourceLimits bool
}
// ProcessState holds information about the state of a user process.
type ProcessState struct {
Pid int
ExitCode int
Signal int
IsolationConfig *dstructs.IsolationConfig
Time time.Time
}
// nomadPid holds a pid and it's cpu percentage calculator
type nomadPid struct {
pid int
cpuStatsTotal *stats.CpuStats
cpuStatsUser *stats.CpuStats
cpuStatsSys *stats.CpuStats
}
// SyslogServerState holds the address and islation information of a launched
// syslog server
type SyslogServerState struct {
IsolationConfig *dstructs.IsolationConfig
Addr string
}
// ExecutorVersion is the version of the executor
type ExecutorVersion struct {
Version string
}
func (v *ExecutorVersion) GoString() string {
return v.Version
}
// UniversalExecutor is an implementation of the Executor which launches and
// supervises processes. In addition to process supervision it provides resource
// and file system isolation
type UniversalExecutor struct {
cmd exec.Cmd
ctx *ExecutorContext
command *ExecCommand
pids map[int]*nomadPid
pidLock sync.RWMutex
taskDir string
exitState *ProcessState
processExited chan interface{}
fsIsolationEnforced bool
lre *logging.FileRotator
lro *logging.FileRotator
rotatorLock sync.Mutex
shutdownCh chan struct{}
syslogServer *logging.SyslogServer
syslogChan chan *logging.SyslogMessage
resConCtx resourceContainerContext
consulSyncer *consul.Syncer
consulCtx *ConsulContext
totalCpuStats *stats.CpuStats
userCpuStats *stats.CpuStats
systemCpuStats *stats.CpuStats
logger *log.Logger
}
// NewExecutor returns an Executor
func NewExecutor(logger *log.Logger) Executor {
if err := shelpers.Init(); err != nil {
logger.Printf("[FATAL] executor: unable to initialize stats: %v", err)
return nil
}
exec := &UniversalExecutor{
logger: logger,
processExited: make(chan interface{}),
totalCpuStats: stats.NewCpuStats(),
userCpuStats: stats.NewCpuStats(),
systemCpuStats: stats.NewCpuStats(),
pids: make(map[int]*nomadPid),
}
return exec
}
// Version returns the api version of the executor
func (e *UniversalExecutor) Version() (*ExecutorVersion, error) {
return &ExecutorVersion{Version: "1.0.0"}, nil
}
// LaunchCmd launches a process and returns it's state. It also configures an
// applies isolation on certain platforms.
func (e *UniversalExecutor) LaunchCmd(command *ExecCommand, ctx *ExecutorContext) (*ProcessState, error) {
e.logger.Printf("[DEBUG] executor: launching command %v %v", command.Cmd, strings.Join(command.Args, " "))
e.ctx = ctx
e.command = command
// setting the user of the process
if command.User != "" {
e.logger.Printf("[DEBUG] executor: running command as %s", command.User)
if err := e.runAs(command.User); err != nil {
return nil, err
}
}
// configuring the task dir
if err := e.configureTaskDir(); err != nil {
return nil, err
}
e.ctx.TaskEnv.Build()
// configuring the chroot, resource container, and start the plugin
// process in the chroot.
if err := e.configureIsolation(); err != nil {
return nil, err
}
// Apply ourselves into the resource container. The executor MUST be in
// the resource container before the user task is started, otherwise we
// are subject to a fork attack in which a process escapes isolation by
// immediately forking.
if err := e.applyLimits(os.Getpid()); err != nil {
return nil, err
}
// Setup the loggers
if err := e.configureLoggers(); err != nil {
return nil, err
}
e.cmd.Stdout = e.lro
e.cmd.Stderr = e.lre
// Look up the binary path and make it executable
absPath, err := e.lookupBin(ctx.TaskEnv.ReplaceEnv(command.Cmd))
if err != nil {
return nil, err
}
if err := e.makeExecutable(absPath); err != nil {
return nil, err
}
path := absPath
// Determine the path to run as it may have to be relative to the chroot.
if e.fsIsolationEnforced {
rel, err := filepath.Rel(e.taskDir, path)
if err != nil {
return nil, err
}
path = rel
}
// Set the commands arguments
e.cmd.Path = path
e.cmd.Args = append([]string{e.cmd.Path}, ctx.TaskEnv.ParseAndReplace(command.Args)...)
e.cmd.Env = ctx.TaskEnv.EnvList()
// Start the process
if err := e.cmd.Start(); err != nil {
return nil, err
}
go e.collectPids()
go e.wait()
ic := e.resConCtx.getIsolationConfig()
return &ProcessState{Pid: e.cmd.Process.Pid, ExitCode: -1, IsolationConfig: ic, Time: time.Now()}, nil
}
// configureLoggers sets up the standard out/error file rotators
func (e *UniversalExecutor) configureLoggers() error {
e.rotatorLock.Lock()
defer e.rotatorLock.Unlock()
logFileSize := int64(e.ctx.Task.LogConfig.MaxFileSizeMB * 1024 * 1024)
if e.lro == nil {
lro, err := logging.NewFileRotator(e.ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stdout", e.ctx.Task.Name),
e.ctx.Task.LogConfig.MaxFiles, logFileSize, e.logger)
if err != nil {
return err
}
e.lro = lro
}
if e.lre == nil {
lre, err := logging.NewFileRotator(e.ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stderr", e.ctx.Task.Name),
e.ctx.Task.LogConfig.MaxFiles, logFileSize, e.logger)
if err != nil {
return err
}
e.lre = lre
}
return nil
}
// Wait waits until a process has exited and returns it's exitcode and errors
func (e *UniversalExecutor) Wait() (*ProcessState, error) {
<-e.processExited
return e.exitState, nil
}
// COMPAT: prior to Nomad 0.3.2, UpdateTask didn't exist.
// UpdateLogConfig updates the log configuration
func (e *UniversalExecutor) UpdateLogConfig(logConfig *structs.LogConfig) error {
e.ctx.Task.LogConfig = logConfig
if e.lro == nil {
return fmt.Errorf("log rotator for stdout doesn't exist")
}
e.lro.MaxFiles = logConfig.MaxFiles
e.lro.FileSize = int64(logConfig.MaxFileSizeMB * 1024 * 1024)
if e.lre == nil {
return fmt.Errorf("log rotator for stderr doesn't exist")
}
e.lre.MaxFiles = logConfig.MaxFiles
e.lre.FileSize = int64(logConfig.MaxFileSizeMB * 1024 * 1024)
return nil
}
func (e *UniversalExecutor) UpdateTask(task *structs.Task) error {
e.ctx.Task = task
// Updating Log Config
fileSize := int64(task.LogConfig.MaxFileSizeMB * 1024 * 1024)
e.lro.MaxFiles = task.LogConfig.MaxFiles
e.lro.FileSize = fileSize
e.lre.MaxFiles = task.LogConfig.MaxFiles
e.lre.FileSize = fileSize
// Re-syncing task with Consul agent
if e.consulSyncer != nil {
e.interpolateServices(e.ctx.Task)
domain := consul.NewExecutorDomain(e.ctx.AllocID, task.Name)
serviceMap := generateServiceKeys(e.ctx.AllocID, task.Services)
e.consulSyncer.SetServices(domain, serviceMap)
}
return nil
}
// generateServiceKeys takes a list of interpolated Nomad Services and returns a map
// of ServiceKeys to Nomad Services.
func generateServiceKeys(allocID string, services []*structs.Service) map[consul.ServiceKey]*structs.Service {
keys := make(map[consul.ServiceKey]*structs.Service, len(services))
for _, service := range services {
key := consul.GenerateServiceKey(service)
keys[key] = service
}
return keys
}
func (e *UniversalExecutor) wait() {
defer close(e.processExited)
err := e.cmd.Wait()
ic := e.resConCtx.getIsolationConfig()
if err == nil {
e.exitState = &ProcessState{Pid: 0, ExitCode: 0, IsolationConfig: ic, Time: time.Now()}
return
}
exitCode := 1
var signal int
if exitErr, ok := err.(*exec.ExitError); ok {
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
exitCode = status.ExitStatus()
if status.Signaled() {
// bash(1) uses the lower 7 bits of a uint8
// to indicate normal program failure (see
// <sysexits.h>). If a process terminates due
// to a signal, encode the signal number to
// indicate which signal caused the process
// to terminate. Mirror this exit code
// encoding scheme.
const exitSignalBase = 128
signal = int(status.Signal())
exitCode = exitSignalBase + signal
}
}
} else {
e.logger.Printf("[DEBUG] executor: unexpected Wait() error type: %v", err)
}
e.exitState = &ProcessState{Pid: 0, ExitCode: exitCode, Signal: signal, IsolationConfig: ic, Time: time.Now()}
}
var (
// finishedErr is the error message received when trying to kill and already
// exited process.
finishedErr = "os: process already finished"
)
// ClientCleanup is the cleanup routine that a Nomad Client uses to remove the
// reminants of a child UniversalExecutor.
func ClientCleanup(ic *dstructs.IsolationConfig, pid int) error {
return clientCleanup(ic, pid)
}
// Exit cleans up the alloc directory, destroys resource container and kills the
// user process
func (e *UniversalExecutor) Exit() error {
var merr multierror.Error
if e.syslogServer != nil {
e.syslogServer.Shutdown()
}
e.lre.Close()
e.lro.Close()
if e.consulSyncer != nil {
e.consulSyncer.Shutdown()
}
// If the executor did not launch a process, return.
if e.command == nil {
return nil
}
// Prefer killing the process via the resource container.
if e.cmd.Process != nil && !e.command.ResourceLimits {
proc, err := os.FindProcess(e.cmd.Process.Pid)
if err != nil {
e.logger.Printf("[ERR] executor: can't find process with pid: %v, err: %v",
e.cmd.Process.Pid, err)
} else if err := proc.Kill(); err != nil && err.Error() != finishedErr {
merr.Errors = append(merr.Errors,
fmt.Errorf("can't kill process with pid: %v, err: %v", e.cmd.Process.Pid, err))
}
}
if e.command.ResourceLimits {
if err := e.resConCtx.executorCleanup(); err != nil {
merr.Errors = append(merr.Errors, err)
}
}
if e.command.FSIsolation {
if err := e.removeChrootMounts(); err != nil {
merr.Errors = append(merr.Errors, err)
}
}
return merr.ErrorOrNil()
}
// Shutdown sends an interrupt signal to the user process
func (e *UniversalExecutor) ShutDown() error {
if e.cmd.Process == nil {
return fmt.Errorf("executor.shutdown error: no process found")
}
proc, err := os.FindProcess(e.cmd.Process.Pid)
if err != nil {
return fmt.Errorf("executor.shutdown failed to find process: %v", err)
}
if runtime.GOOS == "windows" {
if err := proc.Kill(); err != nil && err.Error() != finishedErr {
return err
}
return nil
}
if err = proc.Signal(os.Interrupt); err != nil && err.Error() != finishedErr {
return fmt.Errorf("executor.shutdown error: %v", err)
}
return nil
}
// SyncServices syncs the services of the task that the executor is running with
// Consul
func (e *UniversalExecutor) SyncServices(ctx *ConsulContext) error {
e.logger.Printf("[INFO] executor: registering services")
e.consulCtx = ctx
if e.consulSyncer == nil {
cs, err := consul.NewSyncer(ctx.ConsulConfig, e.shutdownCh, e.logger)
if err != nil {
return err
}
e.consulSyncer = cs
go e.consulSyncer.Run()
}
e.interpolateServices(e.ctx.Task)
e.consulSyncer.SetDelegatedChecks(e.createCheckMap(), e.createCheck)
e.consulSyncer.SetAddrFinder(e.ctx.Task.FindHostAndPortFor)
domain := consul.NewExecutorDomain(e.ctx.AllocID, e.ctx.Task.Name)
serviceMap := generateServiceKeys(e.ctx.AllocID, e.ctx.Task.Services)
e.consulSyncer.SetServices(domain, serviceMap)
return nil
}
// DeregisterServices removes the services of the task that the executor is
// running from Consul
func (e *UniversalExecutor) DeregisterServices() error {
e.logger.Printf("[INFO] executor: de-registering services and shutting down consul service")
if e.consulSyncer != nil {
return e.consulSyncer.Shutdown()
}
return nil
}
// pidStats returns the resource usage stats per pid
func (e *UniversalExecutor) pidStats() (map[string]*cstructs.ResourceUsage, error) {
stats := make(map[string]*cstructs.ResourceUsage)
e.pidLock.RLock()
pids := make(map[int]*nomadPid, len(e.pids))
for k, v := range e.pids {
pids[k] = v
}
e.pidLock.RUnlock()
for pid, np := range pids {
p, err := process.NewProcess(int32(pid))
if err != nil {
e.logger.Printf("[DEBUG] executor: unable to create new process with pid: %v", pid)
continue
}
ms := &cstructs.MemoryStats{}
if memInfo, err := p.MemoryInfo(); err == nil {
ms.RSS = memInfo.RSS
ms.Swap = memInfo.Swap
ms.Measured = ExecutorBasicMeasuredMemStats
}
cs := &cstructs.CpuStats{}
if cpuStats, err := p.Times(); err == nil {
cs.SystemMode = np.cpuStatsSys.Percent(cpuStats.System * float64(time.Second))
cs.UserMode = np.cpuStatsUser.Percent(cpuStats.User * float64(time.Second))
cs.Measured = ExecutorBasicMeasuredCpuStats
// calculate cpu usage percent
cs.Percent = np.cpuStatsTotal.Percent(cpuStats.Total() * float64(time.Second))
}
stats[strconv.Itoa(pid)] = &cstructs.ResourceUsage{MemoryStats: ms, CpuStats: cs}
}
return stats, nil
}
// configureTaskDir sets the task dir in the executor
func (e *UniversalExecutor) configureTaskDir() error {
taskDir, ok := e.ctx.AllocDir.TaskDirs[e.ctx.Task.Name]
e.taskDir = taskDir
if !ok {
return fmt.Errorf("couldn't find task directory for task %v", e.ctx.Task.Name)
}
e.cmd.Dir = taskDir
return nil
}
// lookupBin looks for path to the binary to run by looking for the binary in
// the following locations, in-order: task/local/, task/, based on host $PATH.
// The return path is absolute.
func (e *UniversalExecutor) lookupBin(bin string) (string, error) {
// Check in the local directory
local := filepath.Join(e.taskDir, allocdir.TaskLocal, bin)
if _, err := os.Stat(local); err == nil {
return local, nil
}
// Check at the root of the task's directory
root := filepath.Join(e.taskDir, bin)
if _, err := os.Stat(root); err == nil {
return root, nil
}
// Check the $PATH
if host, err := exec.LookPath(bin); err == nil {
return host, nil
}
return "", fmt.Errorf("binary %q could not be found", bin)
}
// makeExecutable makes the given file executable for root,group,others.
func (e *UniversalExecutor) makeExecutable(binPath string) error {
if runtime.GOOS == "windows" {
return nil
}
fi, err := os.Stat(binPath)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("binary %q does not exist", binPath)
}
return fmt.Errorf("specified binary is invalid: %v", err)
}
// If it is not executable, make it so.
perm := fi.Mode().Perm()
req := os.FileMode(0555)
if perm&req != req {
if err := os.Chmod(binPath, perm|req); err != nil {
return fmt.Errorf("error making %q executable: %s", binPath, err)
}
}
return nil
}
// getFreePort returns a free port ready to be listened on between upper and
// lower bounds
func (e *UniversalExecutor) getListener(lowerBound uint, upperBound uint) (net.Listener, error) {
if runtime.GOOS == "windows" {
return e.listenerTCP(lowerBound, upperBound)
}
return e.listenerUnix()
}
// listenerTCP creates a TCP listener using an unused port between an upper and
// lower bound
func (e *UniversalExecutor) listenerTCP(lowerBound uint, upperBound uint) (net.Listener, error) {
for i := lowerBound; i <= upperBound; i++ {
addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("localhost:%v", i))
if err != nil {
return nil, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
continue
}
return l, nil
}
return nil, fmt.Errorf("No free port found")
}
// listenerUnix creates a Unix domain socket
func (e *UniversalExecutor) listenerUnix() (net.Listener, error) {
f, err := ioutil.TempFile("", "plugin")
if err != nil {
return nil, err
}
path := f.Name()
if err := f.Close(); err != nil {
return nil, err
}
if err := os.Remove(path); err != nil {
return nil, err
}
return net.Listen("unix", path)
}
// createCheckMap creates a map of checks that the executor will handle on it's
// own
func (e *UniversalExecutor) createCheckMap() map[string]struct{} {
checks := map[string]struct{}{
"script": struct{}{},
}
return checks
}
// createCheck creates NomadCheck from a ServiceCheck
func (e *UniversalExecutor) createCheck(check *structs.ServiceCheck, checkID string) (consul.Check, error) {
if check.Type == structs.ServiceCheckScript && e.ctx.Driver == "docker" {
return &DockerScriptCheck{
id: checkID,
interval: check.Interval,
timeout: check.Timeout,
containerID: e.consulCtx.ContainerID,
logger: e.logger,
cmd: check.Command,
args: check.Args,
}, nil
}
if check.Type == structs.ServiceCheckScript && (e.ctx.Driver == "exec" ||
e.ctx.Driver == "raw_exec" || e.ctx.Driver == "java") {
return &ExecScriptCheck{
id: checkID,
interval: check.Interval,
timeout: check.Timeout,
cmd: check.Command,
args: check.Args,
taskDir: e.taskDir,
FSIsolation: e.command.FSIsolation,
}, nil
}
return nil, fmt.Errorf("couldn't create check for %v", check.Name)
}
// interpolateServices interpolates tags in a service and checks with values from the
// task's environment.
func (e *UniversalExecutor) interpolateServices(task *structs.Task) {
e.ctx.TaskEnv.Build()
for _, service := range task.Services {
for _, check := range service.Checks {
if check.Type == structs.ServiceCheckScript {
check.Name = e.ctx.TaskEnv.ReplaceEnv(check.Name)
check.Command = e.ctx.TaskEnv.ReplaceEnv(check.Command)
check.Args = e.ctx.TaskEnv.ParseAndReplace(check.Args)
check.Path = e.ctx.TaskEnv.ReplaceEnv(check.Path)
check.Protocol = e.ctx.TaskEnv.ReplaceEnv(check.Protocol)
}
}
service.Name = e.ctx.TaskEnv.ReplaceEnv(service.Name)
service.Tags = e.ctx.TaskEnv.ParseAndReplace(service.Tags)
}
}
// collectPids collects the pids of the child processes that the executor is
// running every 5 seconds
func (e *UniversalExecutor) collectPids() {
// Fire the timer right away when the executor starts from there on the pids
// are collected every scan interval
timer := time.NewTimer(0)
defer timer.Stop()
for {
select {
case <-timer.C:
pids, err := e.getAllPids()
if err != nil {
e.logger.Printf("[DEBUG] executor: error collecting pids: %v", err)
}
e.pidLock.Lock()
// Adding pids which are not being tracked
for pid, np := range pids {
if _, ok := e.pids[pid]; !ok {
e.pids[pid] = np
}
}
// Removing pids which are no longer present
for pid := range e.pids {
if _, ok := pids[pid]; !ok {
delete(e.pids, pid)
}
}
e.pidLock.Unlock()
timer.Reset(pidScanInterval)
case <-e.processExited:
return
}
}
}
// scanPids scans all the pids on the machine running the current executor and
// returns the child processes of the executor.
func (e *UniversalExecutor) scanPids(parentPid int, allPids []ps.Process) (map[int]*nomadPid, error) {
processFamily := make(map[int]struct{})
processFamily[parentPid] = struct{}{}
// A buffer for holding pids which haven't matched with any parent pid
var pidsRemaining []ps.Process
for {
// flag to indicate if we have found a match
foundNewPid := false
for _, pid := range allPids {
_, childPid := processFamily[pid.PPid()]
// checking if the pid is a child of any of the parents
if childPid {
processFamily[pid.Pid()] = struct{}{}
foundNewPid = true
} else {
// if it is not, then we add the pid to the buffer
pidsRemaining = append(pidsRemaining, pid)
}
// scan only the pids which are left in the buffer
allPids = pidsRemaining
}
// not scanning anymore if we couldn't find a single match
if !foundNewPid {
break
}
}
res := make(map[int]*nomadPid)
for pid := range processFamily {
np := nomadPid{
pid: pid,
cpuStatsTotal: stats.NewCpuStats(),
cpuStatsUser: stats.NewCpuStats(),
cpuStatsSys: stats.NewCpuStats(),
}
res[pid] = &np
}
return res, nil
}
// aggregatedResourceUsage aggregates the resource usage of all the pids and
// returns a TaskResourceUsage data point
func (e *UniversalExecutor) aggregatedResourceUsage(pidStats map[string]*cstructs.ResourceUsage) *cstructs.TaskResourceUsage {
ts := time.Now().UTC().UnixNano()
var (
systemModeCPU, userModeCPU, percent float64
totalRSS, totalSwap uint64
)
for _, pidStat := range pidStats {
systemModeCPU += pidStat.CpuStats.SystemMode
userModeCPU += pidStat.CpuStats.UserMode
percent += pidStat.CpuStats.Percent
totalRSS += pidStat.MemoryStats.RSS
totalSwap += pidStat.MemoryStats.Swap
}
totalCPU := &cstructs.CpuStats{
SystemMode: systemModeCPU,
UserMode: userModeCPU,
Percent: percent,
Measured: ExecutorBasicMeasuredCpuStats,
TotalTicks: e.systemCpuStats.TicksConsumed(percent),
}
totalMemory := &cstructs.MemoryStats{
RSS: totalRSS,
Swap: totalSwap,
Measured: ExecutorBasicMeasuredMemStats,
}
resourceUsage := cstructs.ResourceUsage{
MemoryStats: totalMemory,
CpuStats: totalCPU,
}
return &cstructs.TaskResourceUsage{
ResourceUsage: &resourceUsage,
Timestamp: ts,
Pids: pidStats,
}
}

View File

@ -0,0 +1,46 @@
// +build darwin dragonfly freebsd netbsd openbsd solaris windows
package executor
import (
"os"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/mitchellh/go-ps"
)
func (e *UniversalExecutor) configureChroot() error {
return nil
}
func (e *UniversalExecutor) removeChrootMounts() error {
return nil
}
func (e *UniversalExecutor) runAs(userid string) error {
return nil
}
func (e *UniversalExecutor) applyLimits(pid int) error {
return nil
}
func (e *UniversalExecutor) configureIsolation() error {
return nil
}
func (e *UniversalExecutor) Stats() (*cstructs.TaskResourceUsage, error) {
pidStats, err := e.pidStats()
if err != nil {
return nil, err
}
return e.aggregatedResourceUsage(pidStats), nil
}
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
allProcesses, err := ps.Processes()
if err != nil {
return nil, err
}
return e.scanPids(os.Getpid(), allProcesses)
}

View File

@ -0,0 +1,373 @@
package executor
import (
"fmt"
"os"
"os/user"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/go-ps"
"github.com/opencontainers/runc/libcontainer/cgroups"
cgroupFs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
cgroupConfig "github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/stats"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/structs"
)
var (
// A mapping of directories on the host OS to attempt to embed inside each
// task's chroot.
chrootEnv = map[string]string{
"/bin": "/bin",
"/etc": "/etc",
"/lib": "/lib",
"/lib32": "/lib32",
"/lib64": "/lib64",
"/run/resolvconf": "/run/resolvconf",
"/sbin": "/sbin",
"/usr": "/usr",
}
// clockTicks is the clocks per second of the machine
clockTicks = uint64(system.GetClockTicks())
// The statistics the executor exposes when using cgroups
ExecutorCgroupMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage", "Kernel Usage", "Kernel Max Usage"}
ExecutorCgroupMeasuredCpuStats = []string{"System Mode", "User Mode", "Throttled Periods", "Throttled Time", "Percent"}
)
// configureIsolation configures chroot and creates cgroups
func (e *UniversalExecutor) configureIsolation() error {
if e.command.FSIsolation {
if err := e.configureChroot(); err != nil {
return err
}
}
if e.command.ResourceLimits {
if err := e.configureCgroups(e.ctx.Task.Resources); err != nil {
return fmt.Errorf("error creating cgroups: %v", err)
}
}
return nil
}
// applyLimits puts a process in a pre-configured cgroup
func (e *UniversalExecutor) applyLimits(pid int) error {
if !e.command.ResourceLimits {
return nil
}
// Entering the process in the cgroup
manager := getCgroupManager(e.resConCtx.groups, nil)
if err := manager.Apply(pid); err != nil {
e.logger.Printf("[ERR] executor: error applying pid to cgroup: %v", err)
if er := e.removeChrootMounts(); er != nil {
e.logger.Printf("[ERR] executor: error removing chroot: %v", er)
}
return err
}
e.resConCtx.cgPaths = manager.GetPaths()
cgConfig := cgroupConfig.Config{Cgroups: e.resConCtx.groups}
if err := manager.Set(&cgConfig); err != nil {
e.logger.Printf("[ERR] executor: error setting cgroup config: %v", err)
if er := DestroyCgroup(e.resConCtx.groups, e.resConCtx.cgPaths, os.Getpid()); er != nil {
e.logger.Printf("[ERR] executor: error destroying cgroup: %v", er)
}
if er := e.removeChrootMounts(); er != nil {
e.logger.Printf("[ERR] executor: error removing chroot: %v", er)
}
return err
}
return nil
}
// configureCgroups converts a Nomad Resources specification into the equivalent
// cgroup configuration. It returns an error if the resources are invalid.
func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error {
e.resConCtx.groups = &cgroupConfig.Cgroup{}
e.resConCtx.groups.Resources = &cgroupConfig.Resources{}
cgroupName := structs.GenerateUUID()
e.resConCtx.groups.Path = filepath.Join("/nomad", cgroupName)
// TODO: verify this is needed for things like network access
e.resConCtx.groups.Resources.AllowAllDevices = true
if resources.MemoryMB > 0 {
// Total amount of memory allowed to consume
e.resConCtx.groups.Resources.Memory = int64(resources.MemoryMB * 1024 * 1024)
// Disable swap to avoid issues on the machine
e.resConCtx.groups.Resources.MemorySwap = int64(-1)
}
if resources.CPU < 2 {
return fmt.Errorf("resources.CPU must be equal to or greater than 2: %v", resources.CPU)
}
// Set the relative CPU shares for this cgroup.
e.resConCtx.groups.Resources.CpuShares = int64(resources.CPU)
if resources.IOPS != 0 {
// Validate it is in an acceptable range.
if resources.IOPS < 10 || resources.IOPS > 1000 {
return fmt.Errorf("resources.IOPS must be between 10 and 1000: %d", resources.IOPS)
}
e.resConCtx.groups.Resources.BlkioWeight = uint16(resources.IOPS)
}
return nil
}
// Stats reports the resource utilization of the cgroup. If there is no resource
// isolation we aggregate the resource utilization of all the pids launched by
// the executor.
func (e *UniversalExecutor) Stats() (*cstructs.TaskResourceUsage, error) {
if !e.command.ResourceLimits {
pidStats, err := e.pidStats()
if err != nil {
return nil, err
}
return e.aggregatedResourceUsage(pidStats), nil
}
ts := time.Now()
manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths)
stats, err := manager.GetStats()
if err != nil {
return nil, err
}
// Memory Related Stats
swap := stats.MemoryStats.SwapUsage
maxUsage := stats.MemoryStats.Usage.MaxUsage
rss := stats.MemoryStats.Stats["rss"]
cache := stats.MemoryStats.Stats["cache"]
ms := &cstructs.MemoryStats{
RSS: rss,
Cache: cache,
Swap: swap.Usage,
MaxUsage: maxUsage,
KernelUsage: stats.MemoryStats.KernelUsage.Usage,
KernelMaxUsage: stats.MemoryStats.KernelUsage.MaxUsage,
Measured: ExecutorCgroupMeasuredMemStats,
}
// CPU Related Stats
totalProcessCPUUsage := float64(stats.CpuStats.CpuUsage.TotalUsage)
userModeTime := float64(stats.CpuStats.CpuUsage.UsageInUsermode)
kernelModeTime := float64(stats.CpuStats.CpuUsage.UsageInKernelmode)
totalPercent := e.totalCpuStats.Percent(totalProcessCPUUsage)
cs := &cstructs.CpuStats{
SystemMode: e.systemCpuStats.Percent(kernelModeTime),
UserMode: e.userCpuStats.Percent(userModeTime),
Percent: totalPercent,
ThrottledPeriods: stats.CpuStats.ThrottlingData.ThrottledPeriods,
ThrottledTime: stats.CpuStats.ThrottlingData.ThrottledTime,
TotalTicks: e.systemCpuStats.TicksConsumed(totalPercent),
Measured: ExecutorCgroupMeasuredCpuStats,
}
taskResUsage := cstructs.TaskResourceUsage{
ResourceUsage: &cstructs.ResourceUsage{
MemoryStats: ms,
CpuStats: cs,
},
Timestamp: ts.UTC().UnixNano(),
}
if pidStats, err := e.pidStats(); err == nil {
taskResUsage.Pids = pidStats
}
return &taskResUsage, nil
}
// runAs takes a user id as a string and looks up the user, and sets the command
// to execute as that user.
func (e *UniversalExecutor) runAs(userid string) error {
u, err := user.Lookup(userid)
if err != nil {
return fmt.Errorf("Failed to identify user %v: %v", userid, err)
}
// Convert the uid and gid
uid, err := strconv.ParseUint(u.Uid, 10, 32)
if err != nil {
return fmt.Errorf("Unable to convert userid to uint32: %s", err)
}
gid, err := strconv.ParseUint(u.Gid, 10, 32)
if err != nil {
return fmt.Errorf("Unable to convert groupid to uint32: %s", err)
}
// Set the command to run as that user and group.
if e.cmd.SysProcAttr == nil {
e.cmd.SysProcAttr = &syscall.SysProcAttr{}
}
if e.cmd.SysProcAttr.Credential == nil {
e.cmd.SysProcAttr.Credential = &syscall.Credential{}
}
e.cmd.SysProcAttr.Credential.Uid = uint32(uid)
e.cmd.SysProcAttr.Credential.Gid = uint32(gid)
return nil
}
// configureChroot configures a chroot
func (e *UniversalExecutor) configureChroot() error {
allocDir := e.ctx.AllocDir
if err := allocDir.MountSharedDir(e.ctx.Task.Name); err != nil {
return err
}
chroot := chrootEnv
if len(e.ctx.ChrootEnv) > 0 {
chroot = e.ctx.ChrootEnv
}
if err := allocDir.Embed(e.ctx.Task.Name, chroot); err != nil {
return err
}
// Set the tasks AllocDir environment variable.
e.ctx.TaskEnv.
SetAllocDir(filepath.Join("/", allocdir.SharedAllocName)).
SetTaskLocalDir(filepath.Join("/", allocdir.TaskLocal)).
Build()
if e.cmd.SysProcAttr == nil {
e.cmd.SysProcAttr = &syscall.SysProcAttr{}
}
e.cmd.SysProcAttr.Chroot = e.taskDir
e.cmd.Dir = "/"
if err := allocDir.MountSpecialDirs(e.taskDir); err != nil {
return err
}
e.fsIsolationEnforced = true
return nil
}
// cleanTaskDir is an idempotent operation to clean the task directory and
// should be called when tearing down the task.
func (e *UniversalExecutor) removeChrootMounts() error {
// Prevent a race between Wait/ForceStop
e.resConCtx.cgLock.Lock()
defer e.resConCtx.cgLock.Unlock()
return e.ctx.AllocDir.UnmountAll()
}
// getAllPids returns the pids of all the processes spun up by the executor. We
// use the libcontainer apis to get the pids when the user is using cgroup
// isolation and we scan the entire process table if the user is not using any
// isolation
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
if e.command.ResourceLimits {
manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths)
pids, err := manager.GetAllPids()
if err != nil {
return nil, err
}
np := make(map[int]*nomadPid, len(pids))
for _, pid := range pids {
np[pid] = &nomadPid{
pid: pid,
cpuStatsTotal: stats.NewCpuStats(),
cpuStatsSys: stats.NewCpuStats(),
cpuStatsUser: stats.NewCpuStats(),
}
}
return np, nil
}
allProcesses, err := ps.Processes()
if err != nil {
return nil, err
}
return e.scanPids(os.Getpid(), allProcesses)
}
// destroyCgroup kills all processes in the cgroup and removes the cgroup
// configuration from the host. This function is idempotent.
func DestroyCgroup(groups *cgroupConfig.Cgroup, cgPaths map[string]string, executorPid int) error {
mErrs := new(multierror.Error)
if groups == nil {
return fmt.Errorf("Can't destroy: cgroup configuration empty")
}
// Move the executor into the global cgroup so that the task specific
// cgroup can be destroyed.
nilGroup := &cgroupConfig.Cgroup{}
nilGroup.Path = "/"
nilGroup.Resources = groups.Resources
nilManager := getCgroupManager(nilGroup, nil)
err := nilManager.Apply(executorPid)
if err != nil && !strings.Contains(err.Error(), "no such process") {
return fmt.Errorf("failed to remove executor pid %d: %v", executorPid, err)
}
// Freeze the Cgroup so that it can not continue to fork/exec.
manager := getCgroupManager(groups, cgPaths)
err = manager.Freeze(cgroupConfig.Frozen)
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
return fmt.Errorf("failed to freeze cgroup: %v", err)
}
var procs []*os.Process
pids, err := manager.GetAllPids()
if err != nil {
multierror.Append(mErrs, fmt.Errorf("error getting pids: %v", err))
// Unfreeze the cgroup.
err = manager.Freeze(cgroupConfig.Thawed)
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
multierror.Append(mErrs, fmt.Errorf("failed to unfreeze cgroup: %v", err))
}
return mErrs.ErrorOrNil()
}
// Kill the processes in the cgroup
for _, pid := range pids {
proc, err := os.FindProcess(pid)
if err != nil {
multierror.Append(mErrs, fmt.Errorf("error finding process %v: %v", pid, err))
continue
}
procs = append(procs, proc)
if e := proc.Kill(); e != nil {
multierror.Append(mErrs, fmt.Errorf("error killing process %v: %v", pid, e))
}
}
// Unfreeze the cgroug so we can wait.
err = manager.Freeze(cgroupConfig.Thawed)
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
multierror.Append(mErrs, fmt.Errorf("failed to unfreeze cgroup: %v", err))
}
// Wait on the killed processes to ensure they are cleaned up.
for _, proc := range procs {
// Don't capture the error because we expect this to fail for
// processes we didn't fork.
proc.Wait()
}
// Remove the cgroup.
if err := manager.Destroy(); err != nil {
multierror.Append(mErrs, fmt.Errorf("failed to delete the cgroup directories: %v", err))
}
return mErrs.ErrorOrNil()
}
// getCgroupManager returns the correct libcontainer cgroup manager.
func getCgroupManager(groups *cgroupConfig.Cgroup, paths map[string]string) cgroups.Manager {
return &cgroupFs.Manager{Cgroups: groups, Paths: paths}
}

View File

@ -0,0 +1,50 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package executor
import (
"fmt"
"io"
"log/syslog"
"github.com/hashicorp/nomad/client/driver/logging"
)
func (e *UniversalExecutor) LaunchSyslogServer(ctx *ExecutorContext) (*SyslogServerState, error) {
e.ctx = ctx
// configuring the task dir
if err := e.configureTaskDir(); err != nil {
return nil, err
}
e.syslogChan = make(chan *logging.SyslogMessage, 2048)
l, err := e.getListener(e.ctx.PortLowerBound, e.ctx.PortUpperBound)
if err != nil {
return nil, err
}
e.logger.Printf("[DEBUG] sylog-server: launching syslog server on addr: %v", l.Addr().String())
if err := e.configureLoggers(); err != nil {
return nil, err
}
e.syslogServer = logging.NewSyslogServer(l, e.syslogChan, e.logger)
go e.syslogServer.Start()
go e.collectLogs(e.lre, e.lro)
syslogAddr := fmt.Sprintf("%s://%s", l.Addr().Network(), l.Addr().String())
return &SyslogServerState{Addr: syslogAddr}, nil
}
func (e *UniversalExecutor) collectLogs(we io.Writer, wo io.Writer) {
for logParts := range e.syslogChan {
// If the severity of the log line is err then we write to stderr
// otherwise all messages go to stdout
if logParts.Severity == syslog.LOG_ERR {
e.lre.Write(logParts.Message)
e.lre.Write([]byte{'\n'})
} else {
e.lro.Write(logParts.Message)
e.lro.Write([]byte{'\n'})
}
}
}

View File

@ -0,0 +1,5 @@
package executor
func (e *UniversalExecutor) LaunchSyslogServer(ctx *ExecutorContext) (*SyslogServerState, error) {
return nil, nil
}

View File

@ -0,0 +1,24 @@
// +build darwin dragonfly freebsd netbsd openbsd solaris windows
package executor
import (
dstructs "github.com/hashicorp/nomad/client/driver/structs"
)
// resourceContainerContext is a platform-specific struct for managing a
// resource container.
type resourceContainerContext struct {
}
func clientCleanup(ic *dstructs.IsolationConfig, pid int) error {
return nil
}
func (rc *resourceContainerContext) executorCleanup() error {
return nil
}
func (rc *resourceContainerContext) getIsolationConfig() *dstructs.IsolationConfig {
return nil
}

View File

@ -0,0 +1,42 @@
package executor
import (
"os"
"sync"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
cgroupConfig "github.com/opencontainers/runc/libcontainer/configs"
)
// resourceContainerContext is a platform-specific struct for managing a
// resource container. In the case of Linux, this is used to control Cgroups.
type resourceContainerContext struct {
groups *cgroupConfig.Cgroup
cgPaths map[string]string
cgLock sync.Mutex
}
// clientCleanup remoevs this host's Cgroup from the Nomad Client's context
func clientCleanup(ic *dstructs.IsolationConfig, pid int) error {
if err := DestroyCgroup(ic.Cgroup, ic.CgroupPaths, pid); err != nil {
return err
}
return nil
}
// cleanup removes this host's Cgroup from within an Executor's context
func (rc *resourceContainerContext) executorCleanup() error {
rc.cgLock.Lock()
defer rc.cgLock.Unlock()
if err := DestroyCgroup(rc.groups, rc.cgPaths, os.Getpid()); err != nil {
return err
}
return nil
}
func (rc *resourceContainerContext) getIsolationConfig() *dstructs.IsolationConfig {
return &dstructs.IsolationConfig{
Cgroup: rc.groups,
CgroupPaths: rc.cgPaths,
}
}

View File

@ -0,0 +1,181 @@
package driver
import (
"encoding/gob"
"log"
"net/rpc"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/driver/executor"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/structs"
)
// Registering these types since we have to serialize and de-serialize the Task
// structs over the wire between drivers and the executor.
func init() {
gob.Register([]interface{}{})
gob.Register(map[string]interface{}{})
gob.Register([]map[string]string{})
gob.Register([]map[string]int{})
}
type ExecutorRPC struct {
client *rpc.Client
logger *log.Logger
}
// LaunchCmdArgs wraps a user command and the args for the purposes of RPC
type LaunchCmdArgs struct {
Cmd *executor.ExecCommand
Ctx *executor.ExecutorContext
}
// LaunchSyslogServerArgs wraps the executor context for the purposes of RPC
type LaunchSyslogServerArgs struct {
Ctx *executor.ExecutorContext
}
// SyncServicesArgs wraps the consul context for the purposes of RPC
type SyncServicesArgs struct {
Ctx *executor.ConsulContext
}
func (e *ExecutorRPC) LaunchCmd(cmd *executor.ExecCommand, ctx *executor.ExecutorContext) (*executor.ProcessState, error) {
var ps *executor.ProcessState
err := e.client.Call("Plugin.LaunchCmd", LaunchCmdArgs{Cmd: cmd, Ctx: ctx}, &ps)
return ps, err
}
func (e *ExecutorRPC) LaunchSyslogServer(ctx *executor.ExecutorContext) (*executor.SyslogServerState, error) {
var ss *executor.SyslogServerState
err := e.client.Call("Plugin.LaunchSyslogServer", LaunchSyslogServerArgs{Ctx: ctx}, &ss)
return ss, err
}
func (e *ExecutorRPC) Wait() (*executor.ProcessState, error) {
var ps executor.ProcessState
err := e.client.Call("Plugin.Wait", new(interface{}), &ps)
return &ps, err
}
func (e *ExecutorRPC) ShutDown() error {
return e.client.Call("Plugin.ShutDown", new(interface{}), new(interface{}))
}
func (e *ExecutorRPC) Exit() error {
return e.client.Call("Plugin.Exit", new(interface{}), new(interface{}))
}
func (e *ExecutorRPC) UpdateLogConfig(logConfig *structs.LogConfig) error {
return e.client.Call("Plugin.UpdateLogConfig", logConfig, new(interface{}))
}
func (e *ExecutorRPC) UpdateTask(task *structs.Task) error {
return e.client.Call("Plugin.UpdateTask", task, new(interface{}))
}
func (e *ExecutorRPC) SyncServices(ctx *executor.ConsulContext) error {
return e.client.Call("Plugin.SyncServices", SyncServicesArgs{Ctx: ctx}, new(interface{}))
}
func (e *ExecutorRPC) DeregisterServices() error {
return e.client.Call("Plugin.DeregisterServices", new(interface{}), new(interface{}))
}
func (e *ExecutorRPC) Version() (*executor.ExecutorVersion, error) {
var version executor.ExecutorVersion
err := e.client.Call("Plugin.Version", new(interface{}), &version)
return &version, err
}
func (e *ExecutorRPC) Stats() (*cstructs.TaskResourceUsage, error) {
var resourceUsage cstructs.TaskResourceUsage
err := e.client.Call("Plugin.Stats", new(interface{}), &resourceUsage)
return &resourceUsage, err
}
type ExecutorRPCServer struct {
Impl executor.Executor
logger *log.Logger
}
func (e *ExecutorRPCServer) LaunchCmd(args LaunchCmdArgs, ps *executor.ProcessState) error {
state, err := e.Impl.LaunchCmd(args.Cmd, args.Ctx)
if state != nil {
*ps = *state
}
return err
}
func (e *ExecutorRPCServer) LaunchSyslogServer(args LaunchSyslogServerArgs, ss *executor.SyslogServerState) error {
state, err := e.Impl.LaunchSyslogServer(args.Ctx)
if state != nil {
*ss = *state
}
return err
}
func (e *ExecutorRPCServer) Wait(args interface{}, ps *executor.ProcessState) error {
state, err := e.Impl.Wait()
if state != nil {
*ps = *state
}
return err
}
func (e *ExecutorRPCServer) ShutDown(args interface{}, resp *interface{}) error {
return e.Impl.ShutDown()
}
func (e *ExecutorRPCServer) Exit(args interface{}, resp *interface{}) error {
return e.Impl.Exit()
}
func (e *ExecutorRPCServer) UpdateLogConfig(args *structs.LogConfig, resp *interface{}) error {
return e.Impl.UpdateLogConfig(args)
}
func (e *ExecutorRPCServer) UpdateTask(args *structs.Task, resp *interface{}) error {
return e.Impl.UpdateTask(args)
}
func (e *ExecutorRPCServer) SyncServices(args SyncServicesArgs, resp *interface{}) error {
return e.Impl.SyncServices(args.Ctx)
}
func (e *ExecutorRPCServer) DeregisterServices(args interface{}, resp *interface{}) error {
return e.Impl.DeregisterServices()
}
func (e *ExecutorRPCServer) Version(args interface{}, version *executor.ExecutorVersion) error {
ver, err := e.Impl.Version()
if ver != nil {
*version = *ver
}
return err
}
func (e *ExecutorRPCServer) Stats(args interface{}, resourceUsage *cstructs.TaskResourceUsage) error {
ru, err := e.Impl.Stats()
if ru != nil {
*resourceUsage = *ru
}
return err
}
type ExecutorPlugin struct {
logger *log.Logger
Impl *ExecutorRPCServer
}
func (p *ExecutorPlugin) Server(*plugin.MuxBroker) (interface{}, error) {
if p.Impl == nil {
p.Impl = &ExecutorRPCServer{Impl: executor.NewExecutor(p.logger), logger: p.logger}
}
return p.Impl, nil
}
func (p *ExecutorPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
return &ExecutorRPC{client: c, logger: p.logger}, nil
}

416
vendor/github.com/hashicorp/nomad/client/driver/java.go generated vendored Normal file
View File

@ -0,0 +1,416 @@
package driver
import (
"bytes"
"encoding/json"
"fmt"
"log"
"os/exec"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-plugin"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/fingerprint"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/discover"
"github.com/hashicorp/nomad/helper/fields"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
// The key populated in Node Attributes to indicate presence of the Java
// driver
javaDriverAttr = "driver.java"
)
// JavaDriver is a simple driver to execute applications packaged in Jars.
// It literally just fork/execs tasks with the java command.
type JavaDriver struct {
DriverContext
fingerprint.StaticFingerprinter
}
type JavaDriverConfig struct {
JarPath string `mapstructure:"jar_path"`
JvmOpts []string `mapstructure:"jvm_options"`
Args []string `mapstructure:"args"`
}
// javaHandle is returned from Start/Open as a handle to the PID
type javaHandle struct {
pluginClient *plugin.Client
userPid int
executor executor.Executor
isolationConfig *dstructs.IsolationConfig
taskDir string
allocDir *allocdir.AllocDir
killTimeout time.Duration
maxKillTimeout time.Duration
version string
logger *log.Logger
waitCh chan *dstructs.WaitResult
doneCh chan struct{}
}
// NewJavaDriver is used to create a new exec driver
func NewJavaDriver(ctx *DriverContext) Driver {
return &JavaDriver{DriverContext: *ctx}
}
// Validate is used to validate the driver configuration
func (d *JavaDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"jar_path": &fields.FieldSchema{
Type: fields.TypeString,
Required: true,
},
"jvm_options": &fields.FieldSchema{
Type: fields.TypeArray,
},
"args": &fields.FieldSchema{
Type: fields.TypeArray,
},
},
}
if err := fd.Validate(); err != nil {
return err
}
return nil
}
func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Get the current status so that we can log any debug messages only if the
// state changes
_, currentlyEnabled := node.Attributes[javaDriverAttr]
// Only enable if we are root and cgroups are mounted when running on linux systems.
if runtime.GOOS == "linux" && (syscall.Geteuid() != 0 || !d.cgroupsMounted(node)) {
if currentlyEnabled {
d.logger.Printf("[DEBUG] driver.java: root priviledges and mounted cgroups required on linux, disabling")
}
delete(node.Attributes, "driver.java")
return false, nil
}
// Find java version
var out bytes.Buffer
var erOut bytes.Buffer
cmd := exec.Command("java", "-version")
cmd.Stdout = &out
cmd.Stderr = &erOut
err := cmd.Run()
if err != nil {
// assume Java wasn't found
delete(node.Attributes, javaDriverAttr)
return false, nil
}
// 'java -version' returns output on Stderr typically.
// Check stdout, but it's probably empty
var infoString string
if out.String() != "" {
infoString = out.String()
}
if erOut.String() != "" {
infoString = erOut.String()
}
if infoString == "" {
if currentlyEnabled {
d.logger.Println("[WARN] driver.java: error parsing Java version information, aborting")
}
delete(node.Attributes, javaDriverAttr)
return false, nil
}
// Assume 'java -version' returns 3 lines:
// java version "1.6.0_36"
// OpenJDK Runtime Environment (IcedTea6 1.13.8) (6b36-1.13.8-0ubuntu1~12.04)
// OpenJDK 64-Bit Server VM (build 23.25-b01, mixed mode)
// Each line is terminated by \n
info := strings.Split(infoString, "\n")
versionString := info[0]
versionString = strings.TrimPrefix(versionString, "java version ")
versionString = strings.Trim(versionString, "\"")
node.Attributes[javaDriverAttr] = "1"
node.Attributes["driver.java.version"] = versionString
node.Attributes["driver.java.runtime"] = info[1]
node.Attributes["driver.java.vm"] = info[2]
return true, nil
}
func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig JavaDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
// Set the host environment variables.
filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
d.taskEnv.AppendHostEnvvars(filter)
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
if driverConfig.JarPath == "" {
return nil, fmt.Errorf("jar_path must be specified")
}
args := []string{}
// Look for jvm options
if len(driverConfig.JvmOpts) != 0 {
d.logger.Printf("[DEBUG] driver.java: found JVM options: %s", driverConfig.JvmOpts)
args = append(args, driverConfig.JvmOpts...)
}
// Build the argument list.
args = append(args, "-jar", driverConfig.JarPath)
if len(driverConfig.Args) != 0 {
args = append(args, driverConfig.Args...)
}
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "java",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
ChrootEnv: d.config.ChrootEnv,
Task: task,
}
absPath, err := GetAbsolutePath("java")
if err != nil {
return nil, err
}
ps, err := execIntf.LaunchCmd(&executor.ExecCommand{
Cmd: absPath,
Args: args,
FSIsolation: true,
ResourceLimits: true,
User: getExecutorUser(task),
}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[DEBUG] driver.java: started process with pid: %v", ps.Pid)
// Return a driver handle
maxKill := d.DriverContext.config.MaxKillTimeout
h := &javaHandle{
pluginClient: pluginClient,
executor: execIntf,
userPid: ps.Pid,
isolationConfig: ps.IsolationConfig,
taskDir: taskDir,
allocDir: ctx.AllocDir,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
version: d.config.Version,
logger: d.logger,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
d.logger.Printf("[ERR] driver.java: error registering services with consul for task: %q: %v", task.Name, err)
}
go h.run()
return h, nil
}
// cgroupsMounted returns true if the cgroups are mounted on a system otherwise
// returns false
func (d *JavaDriver) cgroupsMounted(node *structs.Node) bool {
_, ok := node.Attributes["unique.cgroup.mountpoint"]
return ok
}
type javaId struct {
Version string
KillTimeout time.Duration
MaxKillTimeout time.Duration
PluginConfig *PluginReattachConfig
IsolationConfig *dstructs.IsolationConfig
TaskDir string
AllocDir *allocdir.AllocDir
UserPid int
}
func (d *JavaDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
id := &javaId{}
if err := json.Unmarshal([]byte(handleID), id); err != nil {
return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
}
pluginConfig := &plugin.ClientConfig{
Reattach: id.PluginConfig.PluginConfig(),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
merrs := new(multierror.Error)
merrs.Errors = append(merrs.Errors, err)
d.logger.Println("[ERR] driver.java: error connecting to plugin so destroying plugin pid and user pid")
if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil {
merrs.Errors = append(merrs.Errors, fmt.Errorf("error destroying plugin and userpid: %v", e))
}
if id.IsolationConfig != nil {
ePid := pluginConfig.Reattach.Pid
if e := executor.ClientCleanup(id.IsolationConfig, ePid); e != nil {
merrs.Errors = append(merrs.Errors, fmt.Errorf("destroying resource container failed: %v", e))
}
}
if e := ctx.AllocDir.UnmountAll(); e != nil {
merrs.Errors = append(merrs.Errors, e)
}
return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil())
}
ver, _ := exec.Version()
d.logger.Printf("[DEBUG] driver.java: version of executor: %v", ver.Version)
// Return a driver handle
h := &javaHandle{
pluginClient: pluginClient,
executor: exec,
userPid: id.UserPid,
isolationConfig: id.IsolationConfig,
taskDir: id.TaskDir,
allocDir: id.AllocDir,
logger: d.logger,
version: id.Version,
killTimeout: id.KillTimeout,
maxKillTimeout: id.MaxKillTimeout,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
d.logger.Printf("[ERR] driver.java: error registering services with consul: %v", err)
}
go h.run()
return h, nil
}
func (h *javaHandle) ID() string {
id := javaId{
Version: h.version,
KillTimeout: h.killTimeout,
MaxKillTimeout: h.maxKillTimeout,
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
UserPid: h.userPid,
TaskDir: h.taskDir,
AllocDir: h.allocDir,
IsolationConfig: h.isolationConfig,
}
data, err := json.Marshal(id)
if err != nil {
h.logger.Printf("[ERR] driver.java: failed to marshal ID to JSON: %s", err)
}
return string(data)
}
func (h *javaHandle) WaitCh() chan *dstructs.WaitResult {
return h.waitCh
}
func (h *javaHandle) Update(task *structs.Task) error {
// Store the updated kill timeout.
h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout)
h.executor.UpdateTask(task)
// Update is not possible
return nil
}
func (h *javaHandle) Kill() error {
if err := h.executor.ShutDown(); err != nil {
if h.pluginClient.Exited() {
return nil
}
return fmt.Errorf("executor Shutdown failed: %v", err)
}
select {
case <-h.doneCh:
return nil
case <-time.After(h.killTimeout):
if h.pluginClient.Exited() {
return nil
}
if err := h.executor.Exit(); err != nil {
return fmt.Errorf("executor Exit failed: %v", err)
}
return nil
}
}
func (h *javaHandle) Stats() (*cstructs.TaskResourceUsage, error) {
return h.executor.Stats()
}
func (h *javaHandle) run() {
ps, err := h.executor.Wait()
close(h.doneCh)
if ps.ExitCode == 0 && err != nil {
if h.isolationConfig != nil {
ePid := h.pluginClient.ReattachConfig().Pid
if e := executor.ClientCleanup(h.isolationConfig, ePid); e != nil {
h.logger.Printf("[ERR] driver.java: destroying resource container failed: %v", e)
}
} else {
if e := killProcess(h.userPid); e != nil {
h.logger.Printf("[ERR] driver.java: error killing user process: %v", e)
}
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERR] driver.java: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
h.waitCh <- &dstructs.WaitResult{ExitCode: ps.ExitCode, Signal: ps.Signal, Err: err}
close(h.waitCh)
// Remove services
if err := h.executor.DeregisterServices(); err != nil {
h.logger.Printf("[ERR] driver.java: failed to kill the deregister services: %v", err)
}
h.executor.Exit()
h.pluginClient.Kill()
}

View File

@ -0,0 +1,71 @@
package logging
import (
"log"
"github.com/hashicorp/nomad/client/allocdir"
cstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/nomad/structs"
)
// LogCollectorContext holds context to configure the syslog server
type LogCollectorContext struct {
// TaskName is the name of the Task
TaskName string
// AllocDir is the handle to do operations on the alloc dir of
// the task
AllocDir *allocdir.AllocDir
// LogConfig provides configuration related to log rotation
LogConfig *structs.LogConfig
// PortUpperBound is the upper bound of the ports that we can use to start
// the syslog server
PortUpperBound uint
// PortLowerBound is the lower bound of the ports that we can use to start
// the syslog server
PortLowerBound uint
}
// SyslogCollectorState holds the address and islation information of a launched
// syslog server
type SyslogCollectorState struct {
IsolationConfig *cstructs.IsolationConfig
Addr string
}
// LogCollector is an interface which allows a driver to launch a log server
// and update log configuration
type LogCollector interface {
LaunchCollector(ctx *LogCollectorContext) (*SyslogCollectorState, error)
Exit() error
UpdateLogConfig(logConfig *structs.LogConfig) error
}
// SyslogCollector is a LogCollector which starts a syslog server and does
// rotation to incoming stream
type SyslogCollector struct {
}
// NewSyslogCollector returns an implementation of the SyslogCollector
func NewSyslogCollector(logger *log.Logger) *SyslogCollector {
return &SyslogCollector{}
}
// LaunchCollector launches a new syslog server and starts writing log lines to
// files and rotates them
func (s *SyslogCollector) LaunchCollector(ctx *LogCollectorContext) (*SyslogCollectorState, error) {
return nil, nil
}
// Exit kills the syslog server
func (s *SyslogCollector) Exit() error {
return nil
}
// UpdateLogConfig updates the log configuration
func (s *SyslogCollector) UpdateLogConfig(logConfig *structs.LogConfig) error {
return nil
}

View File

@ -0,0 +1,285 @@
package logging
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
const (
bufSize = 32768
flushDur = 100 * time.Millisecond
)
// FileRotator writes bytes to a rotated set of files
type FileRotator struct {
MaxFiles int // MaxFiles is the maximum number of rotated files allowed in a path
FileSize int64 // FileSize is the size a rotated file is allowed to grow
path string // path is the path on the file system where the rotated set of files are opened
baseFileName string // baseFileName is the base file name of the rotated files
logFileIdx int // logFileIdx is the current index of the rotated files
oldestLogFileIdx int // oldestLogFileIdx is the index of the oldest log file in a path
currentFile *os.File // currentFile is the file that is currently getting written
currentWr int64 // currentWr is the number of bytes written to the current file
bufw *bufio.Writer
bufLock sync.Mutex
flushTicker *time.Ticker
logger *log.Logger
purgeCh chan struct{}
doneCh chan struct{}
closed bool
closedLock sync.Mutex
}
// NewFileRotator returns a new file rotator
func NewFileRotator(path string, baseFile string, maxFiles int,
fileSize int64, logger *log.Logger) (*FileRotator, error) {
rotator := &FileRotator{
MaxFiles: maxFiles,
FileSize: fileSize,
path: path,
baseFileName: baseFile,
flushTicker: time.NewTicker(flushDur),
logger: logger,
purgeCh: make(chan struct{}, 1),
doneCh: make(chan struct{}, 1),
}
if err := rotator.lastFile(); err != nil {
return nil, err
}
go rotator.purgeOldFiles()
go rotator.flushPeriodically()
return rotator, nil
}
// Write writes a byte array to a file and rotates the file if it's size becomes
// equal to the maximum size the user has defined.
func (f *FileRotator) Write(p []byte) (n int, err error) {
n = 0
var nw int
for n < len(p) {
// Check if we still have space in the current file, otherwise close and
// open the next file
if f.currentWr >= f.FileSize {
f.flushBuffer()
f.currentFile.Close()
if err := f.nextFile(); err != nil {
f.logger.Printf("[ERROR] driver.rotator: error creating next file: %v", err)
return 0, err
}
}
// Calculate the remaining size on this file
remainingSize := f.FileSize - f.currentWr
// Check if the number of bytes that we have to write is less than the
// remaining size of the file
if remainingSize < int64(len(p[n:])) {
// Write the number of bytes that we can write on the current file
li := int64(n) + remainingSize
nw, err = f.writeToBuffer(p[n:li])
} else {
// Write all the bytes in the current file
nw, err = f.writeToBuffer(p[n:])
}
// Increment the number of bytes written so far in this method
// invocation
n += nw
// Increment the total number of bytes in the file
f.currentWr += int64(n)
if err != nil {
f.logger.Printf("[ERROR] driver.rotator: error writing to file: %v", err)
return
}
}
return
}
// nextFile opens the next file and purges older files if the number of rotated
// files is larger than the maximum files configured by the user
func (f *FileRotator) nextFile() error {
nextFileIdx := f.logFileIdx
for {
nextFileIdx += 1
logFileName := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, nextFileIdx))
if fi, err := os.Stat(logFileName); err == nil {
if fi.IsDir() || fi.Size() >= f.FileSize {
continue
}
}
f.logFileIdx = nextFileIdx
if err := f.createFile(); err != nil {
return err
}
break
}
// Purge old files if we have more files than MaxFiles
f.closedLock.Lock()
defer f.closedLock.Unlock()
if f.logFileIdx-f.oldestLogFileIdx >= f.MaxFiles && !f.closed {
select {
case f.purgeCh <- struct{}{}:
default:
}
}
return nil
}
// lastFile finds out the rotated file with the largest index in a path.
func (f *FileRotator) lastFile() error {
finfos, err := ioutil.ReadDir(f.path)
if err != nil {
return err
}
prefix := fmt.Sprintf("%s.", f.baseFileName)
for _, fi := range finfos {
if fi.IsDir() {
continue
}
if strings.HasPrefix(fi.Name(), prefix) {
fileIdx := strings.TrimPrefix(fi.Name(), prefix)
n, err := strconv.Atoi(fileIdx)
if err != nil {
continue
}
if n > f.logFileIdx {
f.logFileIdx = n
}
}
}
if err := f.createFile(); err != nil {
return err
}
return nil
}
// createFile opens a new or existing file for writing
func (f *FileRotator) createFile() error {
logFileName := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, f.logFileIdx))
cFile, err := os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
return err
}
f.currentFile = cFile
fi, err := f.currentFile.Stat()
if err != nil {
return err
}
f.currentWr = fi.Size()
f.createOrResetBuffer()
return nil
}
// flushPeriodically flushes the buffered writer every 100ms to the underlying
// file
func (f *FileRotator) flushPeriodically() {
for _ = range f.flushTicker.C {
f.flushBuffer()
}
}
func (f *FileRotator) Close() {
f.closedLock.Lock()
defer f.closedLock.Unlock()
// Stop the ticker and flush for one last time
f.flushTicker.Stop()
f.flushBuffer()
// Stop the purge go routine
if !f.closed {
f.doneCh <- struct{}{}
close(f.purgeCh)
f.closed = true
}
}
// purgeOldFiles removes older files and keeps only the last N files rotated for
// a file
func (f *FileRotator) purgeOldFiles() {
for {
select {
case <-f.purgeCh:
var fIndexes []int
files, err := ioutil.ReadDir(f.path)
if err != nil {
return
}
// Inserting all the rotated files in a slice
for _, fi := range files {
if strings.HasPrefix(fi.Name(), f.baseFileName) {
fileIdx := strings.TrimPrefix(fi.Name(), fmt.Sprintf("%s.", f.baseFileName))
n, err := strconv.Atoi(fileIdx)
if err != nil {
continue
}
fIndexes = append(fIndexes, n)
}
}
// Not continuing to delete files if the number of files is not more
// than MaxFiles
if len(fIndexes) <= f.MaxFiles {
continue
}
// Sorting the file indexes so that we can purge the older files and keep
// only the number of files as configured by the user
sort.Sort(sort.IntSlice(fIndexes))
toDelete := fIndexes[0 : len(fIndexes)-f.MaxFiles]
for _, fIndex := range toDelete {
fname := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, fIndex))
os.RemoveAll(fname)
}
f.oldestLogFileIdx = fIndexes[0]
case <-f.doneCh:
return
}
}
}
// flushBuffer flushes the buffer
func (f *FileRotator) flushBuffer() error {
f.bufLock.Lock()
defer f.bufLock.Unlock()
if f.bufw != nil {
return f.bufw.Flush()
}
return nil
}
// writeToBuffer writes the byte array to buffer
func (f *FileRotator) writeToBuffer(p []byte) (int, error) {
f.bufLock.Lock()
defer f.bufLock.Unlock()
return f.bufw.Write(p)
}
// createOrResetBuffer creates a new buffer if we don't have one otherwise
// resets the buffer
func (f *FileRotator) createOrResetBuffer() {
f.bufLock.Lock()
defer f.bufLock.Unlock()
if f.bufw == nil {
f.bufw = bufio.NewWriterSize(f.currentFile, bufSize)
} else {
f.bufw.Reset(f.currentFile)
}
}

View File

@ -0,0 +1,158 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package logging
import (
"fmt"
"log"
"log/syslog"
"strconv"
)
// Errors related to parsing priority
var (
ErrPriorityNoStart = fmt.Errorf("No start char found for priority")
ErrPriorityEmpty = fmt.Errorf("Priority field empty")
ErrPriorityNoEnd = fmt.Errorf("No end char found for priority")
ErrPriorityTooShort = fmt.Errorf("Priority field too short")
ErrPriorityTooLong = fmt.Errorf("Priority field too long")
ErrPriorityNonDigit = fmt.Errorf("Non digit found in priority")
)
// Priority header and ending characters
const (
PRI_PART_START = '<'
PRI_PART_END = '>'
)
// SyslogMessage represents a log line received
type SyslogMessage struct {
Message []byte
Severity syslog.Priority
}
// Priority holds all the priority bits in a syslog log line
type Priority struct {
Pri int
Facility syslog.Priority
Severity syslog.Priority
}
// DockerLogParser parses a line of log message that the docker daemon ships
type DockerLogParser struct {
logger *log.Logger
}
// NewDockerLogParser creates a new DockerLogParser
func NewDockerLogParser(logger *log.Logger) *DockerLogParser {
return &DockerLogParser{logger: logger}
}
// Parse parses a syslog log line
func (d *DockerLogParser) Parse(line []byte) *SyslogMessage {
pri, _, _ := d.parsePriority(line)
msgIdx := d.logContentIndex(line)
// Create a copy of the line so that subsequent Scans do not override the
// message
lineCopy := make([]byte, len(line[msgIdx:]))
copy(lineCopy, line[msgIdx:])
return &SyslogMessage{
Severity: pri.Severity,
Message: lineCopy,
}
}
// logContentIndex finds out the index of the start index of the content in a
// syslog line
func (d *DockerLogParser) logContentIndex(line []byte) int {
cursor := 0
numSpace := 0
numColons := 0
// first look for at least 2 colons. This matches into the date that has no more spaces in it
// DefaultFormatter log line look: '<30>2016-07-06T15:13:11Z00:00 hostname docker/9648c64f5037[16200]'
// UnixFormatter log line look: '<30>Jul 6 15:13:11 docker/9648c64f5037[16200]'
for i := 0; i < len(line); i++ {
if line[i] == ':' {
numColons += 1
if numColons == 2 {
cursor = i
break
}
}
}
// then look for the next space
for i := cursor; i < len(line); i++ {
if line[i] == ' ' {
numSpace += 1
if numSpace == 1 {
cursor = i
break
}
}
}
// then the colon is what seperates it, followed by a space
for i := cursor; i < len(line); i++ {
if line[i] == ':' && i+1 < len(line) && line[i+1] == ' ' {
cursor = i + 1
break
}
}
// return the cursor to the next character
return cursor + 1
}
// parsePriority parses the priority in a syslog message
func (d *DockerLogParser) parsePriority(line []byte) (Priority, int, error) {
cursor := 0
pri := d.newPriority(0)
if len(line) <= 0 {
return pri, cursor, ErrPriorityEmpty
}
if line[cursor] != PRI_PART_START {
return pri, cursor, ErrPriorityNoStart
}
i := 1
priDigit := 0
for i < len(line) {
if i >= 5 {
return pri, cursor, ErrPriorityTooLong
}
c := line[i]
if c == PRI_PART_END {
if i == 1 {
return pri, cursor, ErrPriorityTooShort
}
cursor = i + 1
return d.newPriority(priDigit), cursor, nil
}
if d.isDigit(c) {
v, e := strconv.Atoi(string(c))
if e != nil {
return pri, cursor, e
}
priDigit = (priDigit * 10) + v
} else {
return pri, cursor, ErrPriorityNonDigit
}
i++
}
return pri, cursor, ErrPriorityNoEnd
}
// isDigit checks if a byte is a numeric char
func (d *DockerLogParser) isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
// newPriority creates a new default priority
func (d *DockerLogParser) newPriority(p int) Priority {
// The Priority value is calculated by first multiplying the Facility
// number by 8 and then adding the numerical value of the Severity.
return Priority{
Pri: p,
Facility: syslog.Priority(p / 8),
Severity: syslog.Priority(p % 8),
}
}

View File

@ -0,0 +1,86 @@
// +build !windows
package logging
import (
"bufio"
"log"
"net"
"sync"
)
// SyslogServer is a server which listens to syslog messages and parses them
type SyslogServer struct {
listener net.Listener
messages chan *SyslogMessage
parser *DockerLogParser
doneCh chan interface{}
done bool
doneLock sync.Mutex
logger *log.Logger
}
// NewSyslogServer creates a new syslog server
func NewSyslogServer(l net.Listener, messages chan *SyslogMessage, logger *log.Logger) *SyslogServer {
parser := NewDockerLogParser(logger)
return &SyslogServer{
listener: l,
messages: messages,
parser: parser,
logger: logger,
doneCh: make(chan interface{}),
}
}
// Start starts accepting syslog connections
func (s *SyslogServer) Start() {
for {
select {
case <-s.doneCh:
s.listener.Close()
return
default:
connection, err := s.listener.Accept()
if err != nil {
s.logger.Printf("[ERR] logcollector.server: error in accepting connection: %v", err)
continue
}
go s.read(connection)
}
}
}
// read reads the bytes from a connection
func (s *SyslogServer) read(connection net.Conn) {
defer connection.Close()
scanner := bufio.NewScanner(bufio.NewReader(connection))
for {
select {
case <-s.doneCh:
return
default:
}
if scanner.Scan() {
b := scanner.Bytes()
msg := s.parser.Parse(b)
s.messages <- msg
} else {
return
}
}
}
// Shutdown shutsdown the syslog server
func (s *SyslogServer) Shutdown() {
s.doneLock.Lock()
s.doneLock.Unlock()
if !s.done {
close(s.doneCh)
close(s.messages)
s.done = true
}
}

View File

@ -0,0 +1,10 @@
package logging
type SyslogServer struct {
}
func (s *SyslogServer) Shutdown() {
}
type SyslogMessage struct {
}

View File

@ -0,0 +1,207 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package logging
import (
"fmt"
"io"
"io/ioutil"
"log"
"log/syslog"
"net"
"os"
"runtime"
"github.com/hashicorp/nomad/client/allocdir"
cstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/nomad/structs"
)
// LogCollectorContext holds context to configure the syslog server
type LogCollectorContext struct {
// TaskName is the name of the Task
TaskName string
// AllocDir is the handle to do operations on the alloc dir of
// the task
AllocDir *allocdir.AllocDir
// LogConfig provides configuration related to log rotation
LogConfig *structs.LogConfig
// PortUpperBound is the upper bound of the ports that we can use to start
// the syslog server
PortUpperBound uint
// PortLowerBound is the lower bound of the ports that we can use to start
// the syslog server
PortLowerBound uint
}
// SyslogCollectorState holds the address and islation information of a launched
// syslog server
type SyslogCollectorState struct {
IsolationConfig *cstructs.IsolationConfig
Addr string
}
// LogCollector is an interface which allows a driver to launch a log server
// and update log configuration
type LogCollector interface {
LaunchCollector(ctx *LogCollectorContext) (*SyslogCollectorState, error)
Exit() error
UpdateLogConfig(logConfig *structs.LogConfig) error
}
// SyslogCollector is a LogCollector which starts a syslog server and does
// rotation to incoming stream
type SyslogCollector struct {
addr net.Addr
logConfig *structs.LogConfig
ctx *LogCollectorContext
lro *FileRotator
lre *FileRotator
server *SyslogServer
syslogChan chan *SyslogMessage
taskDir string
logger *log.Logger
}
// NewSyslogCollector returns an implementation of the SyslogCollector
func NewSyslogCollector(logger *log.Logger) *SyslogCollector {
return &SyslogCollector{logger: logger, syslogChan: make(chan *SyslogMessage, 2048)}
}
// LaunchCollector launches a new syslog server and starts writing log lines to
// files and rotates them
func (s *SyslogCollector) LaunchCollector(ctx *LogCollectorContext) (*SyslogCollectorState, error) {
l, err := s.getListener(ctx.PortLowerBound, ctx.PortUpperBound)
if err != nil {
return nil, err
}
s.logger.Printf("[DEBUG] sylog-server: launching syslog server on addr: %v", l.Addr().String())
s.ctx = ctx
// configuring the task dir
if err := s.configureTaskDir(); err != nil {
return nil, err
}
s.server = NewSyslogServer(l, s.syslogChan, s.logger)
go s.server.Start()
logFileSize := int64(ctx.LogConfig.MaxFileSizeMB * 1024 * 1024)
lro, err := NewFileRotator(ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stdout", ctx.TaskName),
ctx.LogConfig.MaxFiles, logFileSize, s.logger)
if err != nil {
return nil, err
}
s.lro = lro
lre, err := NewFileRotator(ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stderr", ctx.TaskName),
ctx.LogConfig.MaxFiles, logFileSize, s.logger)
if err != nil {
return nil, err
}
s.lre = lre
go s.collectLogs(lre, lro)
syslogAddr := fmt.Sprintf("%s://%s", l.Addr().Network(), l.Addr().String())
return &SyslogCollectorState{Addr: syslogAddr}, nil
}
func (s *SyslogCollector) collectLogs(we io.Writer, wo io.Writer) {
for logParts := range s.syslogChan {
// If the severity of the log line is err then we write to stderr
// otherwise all messages go to stdout
if logParts.Severity == syslog.LOG_ERR {
s.lre.Write(logParts.Message)
s.lre.Write([]byte{'\n'})
} else {
s.lro.Write(logParts.Message)
s.lro.Write([]byte{'\n'})
}
}
}
// Exit kills the syslog server
func (s *SyslogCollector) Exit() error {
s.server.Shutdown()
s.lre.Close()
s.lro.Close()
return nil
}
// UpdateLogConfig updates the log configuration
func (s *SyslogCollector) UpdateLogConfig(logConfig *structs.LogConfig) error {
s.ctx.LogConfig = logConfig
if s.lro == nil {
return fmt.Errorf("log rotator for stdout doesn't exist")
}
s.lro.MaxFiles = logConfig.MaxFiles
s.lro.FileSize = int64(logConfig.MaxFileSizeMB * 1024 * 1024)
if s.lre == nil {
return fmt.Errorf("log rotator for stderr doesn't exist")
}
s.lre.MaxFiles = logConfig.MaxFiles
s.lre.FileSize = int64(logConfig.MaxFileSizeMB * 1024 * 1024)
return nil
}
// configureTaskDir sets the task dir in the SyslogCollector
func (s *SyslogCollector) configureTaskDir() error {
taskDir, ok := s.ctx.AllocDir.TaskDirs[s.ctx.TaskName]
if !ok {
return fmt.Errorf("couldn't find task directory for task %v", s.ctx.TaskName)
}
s.taskDir = taskDir
return nil
}
// getFreePort returns a free port ready to be listened on between upper and
// lower bounds
func (s *SyslogCollector) getListener(lowerBound uint, upperBound uint) (net.Listener, error) {
if runtime.GOOS == "windows" {
return s.listenerTCP(lowerBound, upperBound)
}
return s.listenerUnix()
}
// listenerTCP creates a TCP listener using an unused port between an upper and
// lower bound
func (s *SyslogCollector) listenerTCP(lowerBound uint, upperBound uint) (net.Listener, error) {
for i := lowerBound; i <= upperBound; i++ {
addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("localhost:%v", i))
if err != nil {
return nil, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
continue
}
return l, nil
}
return nil, fmt.Errorf("No free port found")
}
// listenerUnix creates a Unix domain socket
func (s *SyslogCollector) listenerUnix() (net.Listener, error) {
f, err := ioutil.TempFile("", "plugin")
if err != nil {
return nil, err
}
path := f.Name()
if err := f.Close(); err != nil {
return nil, err
}
if err := os.Remove(path); err != nil {
return nil, err
}
return net.Listen("unix", path)
}

View File

@ -0,0 +1,51 @@
package driver
import (
"io"
"log"
"net"
"github.com/hashicorp/go-plugin"
)
var HandshakeConfig = plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "NOMAD_PLUGIN_MAGIC_COOKIE",
MagicCookieValue: "e4327c2e01eabfd75a8a67adb114fb34a757d57eee7728d857a8cec6e91a7255",
}
func GetPluginMap(w io.Writer) map[string]plugin.Plugin {
e := new(ExecutorPlugin)
e.logger = log.New(w, "", log.LstdFlags)
s := new(SyslogCollectorPlugin)
s.logger = log.New(w, "", log.LstdFlags)
return map[string]plugin.Plugin{
"executor": e,
"syslogcollector": s,
}
}
// ExecutorReattachConfig is the config that we seralize and de-serialize and
// store in disk
type PluginReattachConfig struct {
Pid int
AddrNet string
AddrName string
}
// PluginConfig returns a config from an ExecutorReattachConfig
func (c *PluginReattachConfig) PluginConfig() *plugin.ReattachConfig {
var addr net.Addr
switch c.AddrNet {
case "unix", "unixgram", "unixpacket":
addr, _ = net.ResolveUnixAddr(c.AddrNet, c.AddrName)
case "tcp", "tcp4", "tcp6":
addr, _ = net.ResolveTCPAddr(c.AddrNet, c.AddrName)
}
return &plugin.ReattachConfig{Pid: c.Pid, Addr: addr}
}
func NewPluginReattachConfig(c *plugin.ReattachConfig) *PluginReattachConfig {
return &PluginReattachConfig{Pid: c.Pid, AddrNet: c.Addr.Network(), AddrName: c.Addr.String()}
}

412
vendor/github.com/hashicorp/nomad/client/driver/qemu.go generated vendored Normal file
View File

@ -0,0 +1,412 @@
package driver
import (
"encoding/json"
"fmt"
"log"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/fingerprint"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/discover"
"github.com/hashicorp/nomad/helper/fields"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/mapstructure"
)
var (
reQemuVersion = regexp.MustCompile(`version (\d[\.\d+]+)`)
)
const (
// The key populated in Node Attributes to indicate presence of the Qemu
// driver
qemuDriverAttr = "driver.qemu"
)
// QemuDriver is a driver for running images via Qemu
// We attempt to chose sane defaults for now, with more configuration available
// planned in the future
type QemuDriver struct {
DriverContext
fingerprint.StaticFingerprinter
}
type QemuDriverConfig struct {
ImagePath string `mapstructure:"image_path"`
Accelerator string `mapstructure:"accelerator"`
PortMap []map[string]int `mapstructure:"port_map"` // A map of host port labels and to guest ports.
Args []string `mapstructure:"args"` // extra arguments to qemu executable
}
// qemuHandle is returned from Start/Open as a handle to the PID
type qemuHandle struct {
pluginClient *plugin.Client
userPid int
executor executor.Executor
allocDir *allocdir.AllocDir
killTimeout time.Duration
maxKillTimeout time.Duration
logger *log.Logger
version string
waitCh chan *dstructs.WaitResult
doneCh chan struct{}
}
// NewQemuDriver is used to create a new exec driver
func NewQemuDriver(ctx *DriverContext) Driver {
return &QemuDriver{DriverContext: *ctx}
}
// Validate is used to validate the driver configuration
func (d *QemuDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"image_path": &fields.FieldSchema{
Type: fields.TypeString,
Required: true,
},
"accelerator": &fields.FieldSchema{
Type: fields.TypeString,
},
"port_map": &fields.FieldSchema{
Type: fields.TypeArray,
},
"args": &fields.FieldSchema{
Type: fields.TypeArray,
},
},
}
if err := fd.Validate(); err != nil {
return err
}
return nil
}
func (d *QemuDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Get the current status so that we can log any debug messages only if the
// state changes
_, currentlyEnabled := node.Attributes[qemuDriverAttr]
bin := "qemu-system-x86_64"
if runtime.GOOS == "windows" {
// On windows, the "qemu-system-x86_64" command does not respond to the
// version flag.
bin = "qemu-img"
}
outBytes, err := exec.Command(bin, "--version").Output()
if err != nil {
delete(node.Attributes, qemuDriverAttr)
return false, nil
}
out := strings.TrimSpace(string(outBytes))
matches := reQemuVersion.FindStringSubmatch(out)
if len(matches) != 2 {
delete(node.Attributes, qemuDriverAttr)
return false, fmt.Errorf("Unable to parse Qemu version string: %#v", matches)
}
if !currentlyEnabled {
d.logger.Printf("[DEBUG] driver.qemu: enabling driver")
}
node.Attributes[qemuDriverAttr] = "1"
node.Attributes["driver.qemu.version"] = matches[1]
return true, nil
}
// Run an existing Qemu image. Start() will pull down an existing, valid Qemu
// image and save it to the Drivers Allocation Dir
func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig QemuDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
if len(driverConfig.PortMap) > 1 {
return nil, fmt.Errorf("Only one port_map block is allowed in the qemu driver config")
}
// Get the image source
vmPath := driverConfig.ImagePath
if vmPath == "" {
return nil, fmt.Errorf("image_path must be set")
}
vmID := filepath.Base(vmPath)
// Get the tasks local directory.
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Parse configuration arguments
// Create the base arguments
accelerator := "tcg"
if driverConfig.Accelerator != "" {
accelerator = driverConfig.Accelerator
}
// TODO: Check a lower bounds, e.g. the default 128 of Qemu
mem := fmt.Sprintf("%dM", task.Resources.MemoryMB)
absPath, err := GetAbsolutePath("qemu-system-x86_64")
if err != nil {
return nil, err
}
args := []string{
absPath,
"-machine", "type=pc,accel=" + accelerator,
"-name", vmID,
"-m", mem,
"-drive", "file=" + vmPath,
"-nographic",
}
// Add pass through arguments to qemu executable. A user can specify
// these arguments in driver task configuration. These arguments are
// passed directly to the qemu driver as command line options.
// For example, args = [ "-nodefconfig", "-nodefaults" ]
// This will allow a VM with embedded configuration to boot successfully.
args = append(args, driverConfig.Args...)
// Check the Resources required Networks to add port mappings. If no resources
// are required, we assume the VM is a purely compute job and does not require
// the outside world to be able to reach it. VMs ran without port mappings can
// still reach out to the world, but without port mappings it is effectively
// firewalled
protocols := []string{"udp", "tcp"}
if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 {
// Loop through the port map and construct the hostfwd string, to map
// reserved ports to the ports listenting in the VM
// Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080
var forwarding []string
taskPorts := task.Resources.Networks[0].MapLabelToValues(nil)
for label, guest := range driverConfig.PortMap[0] {
host, ok := taskPorts[label]
if !ok {
return nil, fmt.Errorf("Unknown port label %q", label)
}
for _, p := range protocols {
forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest))
}
}
if len(forwarding) != 0 {
args = append(args,
"-netdev",
fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")),
"-device", "virtio-net,netdev=user.0",
)
}
}
// If using KVM, add optimization args
if accelerator == "kvm" {
args = append(args,
"-enable-kvm",
"-cpu", "host",
// Do we have cores information available to the Driver?
// "-smp", fmt.Sprintf("%d", cores),
)
}
d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " "))
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "qemu",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
}
ps, err := exec.LaunchCmd(&executor.ExecCommand{
Cmd: args[0],
Args: args[1:],
User: task.User,
}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[INFO] Started new QemuVM: %s", vmID)
// Create and Return Handle
maxKill := d.DriverContext.config.MaxKillTimeout
h := &qemuHandle{
pluginClient: pluginClient,
executor: exec,
userPid: ps.Pid,
allocDir: ctx.AllocDir,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
version: d.config.Version,
logger: d.logger,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
h.logger.Printf("[ERR] driver.qemu: error registering services for task: %q: %v", task.Name, err)
}
go h.run()
return h, nil
}
type qemuId struct {
Version string
KillTimeout time.Duration
MaxKillTimeout time.Duration
UserPid int
PluginConfig *PluginReattachConfig
AllocDir *allocdir.AllocDir
}
func (d *QemuDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
id := &qemuId{}
if err := json.Unmarshal([]byte(handleID), id); err != nil {
return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
}
pluginConfig := &plugin.ClientConfig{
Reattach: id.PluginConfig.PluginConfig(),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
d.logger.Println("[ERR] driver.qemu: error connecting to plugin so destroying plugin pid and user pid")
if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil {
d.logger.Printf("[ERR] driver.qemu: error destroying plugin and userpid: %v", e)
}
return nil, fmt.Errorf("error connecting to plugin: %v", err)
}
ver, _ := exec.Version()
d.logger.Printf("[DEBUG] driver.qemu: version of executor: %v", ver.Version)
// Return a driver handle
h := &qemuHandle{
pluginClient: pluginClient,
executor: exec,
userPid: id.UserPid,
allocDir: id.AllocDir,
logger: d.logger,
killTimeout: id.KillTimeout,
maxKillTimeout: id.MaxKillTimeout,
version: id.Version,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
h.logger.Printf("[ERR] driver.qemu: error registering services: %v", err)
}
go h.run()
return h, nil
}
func (h *qemuHandle) ID() string {
id := qemuId{
Version: h.version,
KillTimeout: h.killTimeout,
MaxKillTimeout: h.maxKillTimeout,
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
UserPid: h.userPid,
AllocDir: h.allocDir,
}
data, err := json.Marshal(id)
if err != nil {
h.logger.Printf("[ERR] driver.qemu: failed to marshal ID to JSON: %s", err)
}
return string(data)
}
func (h *qemuHandle) WaitCh() chan *dstructs.WaitResult {
return h.waitCh
}
func (h *qemuHandle) Update(task *structs.Task) error {
// Store the updated kill timeout.
h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout)
h.executor.UpdateTask(task)
// Update is not possible
return nil
}
// TODO: allow a 'shutdown_command' that can be executed over a ssh connection
// to the VM
func (h *qemuHandle) Kill() error {
if err := h.executor.ShutDown(); err != nil {
if h.pluginClient.Exited() {
return nil
}
return fmt.Errorf("executor Shutdown failed: %v", err)
}
select {
case <-h.doneCh:
return nil
case <-time.After(h.killTimeout):
if h.pluginClient.Exited() {
return nil
}
if err := h.executor.Exit(); err != nil {
return fmt.Errorf("executor Exit failed: %v", err)
}
return nil
}
}
func (h *qemuHandle) Stats() (*cstructs.TaskResourceUsage, error) {
return h.executor.Stats()
}
func (h *qemuHandle) run() {
ps, err := h.executor.Wait()
if ps.ExitCode == 0 && err != nil {
if e := killProcess(h.userPid); e != nil {
h.logger.Printf("[ERR] driver.qemu: error killing user process: %v", e)
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERR] driver.qemu: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
close(h.doneCh)
h.waitCh <- &dstructs.WaitResult{ExitCode: ps.ExitCode, Signal: ps.Signal, Err: err}
close(h.waitCh)
// Remove services
if err := h.executor.DeregisterServices(); err != nil {
h.logger.Printf("[ERR] driver.qemu: failed to deregister services: %v", err)
}
h.executor.Exit()
h.pluginClient.Kill()
}

View File

@ -0,0 +1,307 @@
package driver
import (
"encoding/json"
"fmt"
"log"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/fingerprint"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/discover"
"github.com/hashicorp/nomad/helper/fields"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/mapstructure"
)
const (
// The option that enables this driver in the Config.Options map.
rawExecConfigOption = "driver.raw_exec.enable"
// The key populated in Node Attributes to indicate presence of the Raw Exec
// driver
rawExecDriverAttr = "driver.raw_exec"
)
// The RawExecDriver is a privileged version of the exec driver. It provides no
// resource isolation and just fork/execs. The Exec driver should be preferred
// and this should only be used when explicitly needed.
type RawExecDriver struct {
DriverContext
fingerprint.StaticFingerprinter
}
// rawExecHandle is returned from Start/Open as a handle to the PID
type rawExecHandle struct {
version string
pluginClient *plugin.Client
userPid int
executor executor.Executor
killTimeout time.Duration
maxKillTimeout time.Duration
allocDir *allocdir.AllocDir
logger *log.Logger
waitCh chan *dstructs.WaitResult
doneCh chan struct{}
}
// NewRawExecDriver is used to create a new raw exec driver
func NewRawExecDriver(ctx *DriverContext) Driver {
return &RawExecDriver{DriverContext: *ctx}
}
// Validate is used to validate the driver configuration
func (d *RawExecDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"command": &fields.FieldSchema{
Type: fields.TypeString,
Required: true,
},
"args": &fields.FieldSchema{
Type: fields.TypeArray,
},
},
}
if err := fd.Validate(); err != nil {
return err
}
return nil
}
func (d *RawExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Get the current status so that we can log any debug messages only if the
// state changes
_, currentlyEnabled := node.Attributes[rawExecDriverAttr]
// Check that the user has explicitly enabled this executor.
enabled := cfg.ReadBoolDefault(rawExecConfigOption, false)
if enabled {
if currentlyEnabled {
d.logger.Printf("[WARN] driver.raw_exec: raw exec is enabled. Only enable if needed")
}
node.Attributes[rawExecDriverAttr] = "1"
return true, nil
}
delete(node.Attributes, rawExecDriverAttr)
return false, nil
}
func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig ExecDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
// Get the tasks local directory.
taskName := d.DriverContext.taskName
taskDir, ok := ctx.AllocDir.TaskDirs[taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Get the command to be ran
command := driverConfig.Command
if err := validateCommand(command, "args"); err != nil {
return nil, err
}
// Set the host environment variables.
filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
d.taskEnv.AppendHostEnvvars(filter)
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "raw_exec",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
}
ps, err := exec.LaunchCmd(&executor.ExecCommand{
Cmd: command,
Args: driverConfig.Args,
User: task.User,
}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[DEBUG] driver.raw_exec: started process with pid: %v", ps.Pid)
// Return a driver handle
maxKill := d.DriverContext.config.MaxKillTimeout
h := &rawExecHandle{
pluginClient: pluginClient,
executor: exec,
userPid: ps.Pid,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
allocDir: ctx.AllocDir,
version: d.config.Version,
logger: d.logger,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
h.logger.Printf("[ERR] driver.raw_exec: error registering services with consul for task: %q: %v", task.Name, err)
}
go h.run()
return h, nil
}
type rawExecId struct {
Version string
KillTimeout time.Duration
MaxKillTimeout time.Duration
UserPid int
PluginConfig *PluginReattachConfig
AllocDir *allocdir.AllocDir
}
func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
id := &rawExecId{}
if err := json.Unmarshal([]byte(handleID), id); err != nil {
return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
}
pluginConfig := &plugin.ClientConfig{
Reattach: id.PluginConfig.PluginConfig(),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
d.logger.Println("[ERR] driver.raw_exec: error connecting to plugin so destroying plugin pid and user pid")
if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil {
d.logger.Printf("[ERR] driver.raw_exec: error destroying plugin and userpid: %v", e)
}
return nil, fmt.Errorf("error connecting to plugin: %v", err)
}
ver, _ := exec.Version()
d.logger.Printf("[DEBUG] driver.raw_exec: version of executor: %v", ver.Version)
// Return a driver handle
h := &rawExecHandle{
pluginClient: pluginClient,
executor: exec,
userPid: id.UserPid,
logger: d.logger,
killTimeout: id.KillTimeout,
maxKillTimeout: id.MaxKillTimeout,
allocDir: id.AllocDir,
version: id.Version,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
h.logger.Printf("[ERR] driver.raw_exec: error registering services with consul: %v", err)
}
go h.run()
return h, nil
}
func (h *rawExecHandle) ID() string {
id := rawExecId{
Version: h.version,
KillTimeout: h.killTimeout,
MaxKillTimeout: h.maxKillTimeout,
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
UserPid: h.userPid,
AllocDir: h.allocDir,
}
data, err := json.Marshal(id)
if err != nil {
h.logger.Printf("[ERR] driver.raw_exec: failed to marshal ID to JSON: %s", err)
}
return string(data)
}
func (h *rawExecHandle) WaitCh() chan *dstructs.WaitResult {
return h.waitCh
}
func (h *rawExecHandle) Update(task *structs.Task) error {
// Store the updated kill timeout.
h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout)
h.executor.UpdateTask(task)
// Update is not possible
return nil
}
func (h *rawExecHandle) Kill() error {
if err := h.executor.ShutDown(); err != nil {
if h.pluginClient.Exited() {
return nil
}
return fmt.Errorf("executor Shutdown failed: %v", err)
}
select {
case <-h.doneCh:
return nil
case <-time.After(h.killTimeout):
if h.pluginClient.Exited() {
return nil
}
if err := h.executor.Exit(); err != nil {
return fmt.Errorf("executor Exit failed: %v", err)
}
return nil
}
}
func (h *rawExecHandle) Stats() (*cstructs.TaskResourceUsage, error) {
return h.executor.Stats()
}
func (h *rawExecHandle) run() {
ps, err := h.executor.Wait()
close(h.doneCh)
if ps.ExitCode == 0 && err != nil {
if e := killProcess(h.userPid); e != nil {
h.logger.Printf("[ERR] driver.raw_exec: error killing user process: %v", e)
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERR] driver.raw_exec: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
h.waitCh <- &dstructs.WaitResult{ExitCode: ps.ExitCode, Signal: ps.Signal, Err: err}
close(h.waitCh)
// Remove services
if err := h.executor.DeregisterServices(); err != nil {
h.logger.Printf("[ERR] driver.raw_exec: failed to deregister services: %v", err)
}
if err := h.executor.Exit(); err != nil {
h.logger.Printf("[ERR] driver.raw_exec: error killing executor: %v", err)
}
h.pluginClient.Kill()
}

436
vendor/github.com/hashicorp/nomad/client/driver/rkt.go generated vendored Normal file
View File

@ -0,0 +1,436 @@
package driver
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"syscall"
"time"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/fingerprint"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/discover"
"github.com/hashicorp/nomad/helper/fields"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/mapstructure"
)
var (
reRktVersion = regexp.MustCompile(`rkt [vV]ersion[:]? (\d[.\d]+)`)
reAppcVersion = regexp.MustCompile(`appc [vV]ersion[:]? (\d[.\d]+)`)
)
const (
// minRktVersion is the earliest supported version of rkt. rkt added support
// for CPU and memory isolators in 0.14.0. We cannot support an earlier
// version to maintain an uniform interface across all drivers
minRktVersion = "0.14.0"
// The key populated in the Node Attributes to indicate the presence of the
// Rkt driver
rktDriverAttr = "driver.rkt"
)
// RktDriver is a driver for running images via Rkt
// We attempt to chose sane defaults for now, with more configuration available
// planned in the future
type RktDriver struct {
DriverContext
fingerprint.StaticFingerprinter
}
type RktDriverConfig struct {
ImageName string `mapstructure:"image"`
Command string `mapstructure:"command"`
Args []string `mapstructure:"args"`
TrustPrefix string `mapstructure:"trust_prefix"`
DNSServers []string `mapstructure:"dns_servers"` // DNS Server for containers
DNSSearchDomains []string `mapstructure:"dns_search_domains"` // DNS Search domains for containers
Debug bool `mapstructure:"debug"` // Enable debug option for rkt command
}
// rktHandle is returned from Start/Open as a handle to the PID
type rktHandle struct {
pluginClient *plugin.Client
executorPid int
executor executor.Executor
allocDir *allocdir.AllocDir
logger *log.Logger
killTimeout time.Duration
maxKillTimeout time.Duration
waitCh chan *dstructs.WaitResult
doneCh chan struct{}
}
// rktPID is a struct to map the pid running the process to the vm image on
// disk
type rktPID struct {
PluginConfig *PluginReattachConfig
AllocDir *allocdir.AllocDir
ExecutorPid int
KillTimeout time.Duration
MaxKillTimeout time.Duration
}
// NewRktDriver is used to create a new exec driver
func NewRktDriver(ctx *DriverContext) Driver {
return &RktDriver{DriverContext: *ctx}
}
// Validate is used to validate the driver configuration
func (d *RktDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"image": &fields.FieldSchema{
Type: fields.TypeString,
Required: true,
},
"command": &fields.FieldSchema{
Type: fields.TypeString,
},
"args": &fields.FieldSchema{
Type: fields.TypeArray,
},
"trust_prefix": &fields.FieldSchema{
Type: fields.TypeString,
},
"dns_servers": &fields.FieldSchema{
Type: fields.TypeArray,
},
"dns_search_domains": &fields.FieldSchema{
Type: fields.TypeArray,
},
"debug": &fields.FieldSchema{
Type: fields.TypeBool,
},
},
}
if err := fd.Validate(); err != nil {
return err
}
return nil
}
func (d *RktDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Get the current status so that we can log any debug messages only if the
// state changes
_, currentlyEnabled := node.Attributes[rktDriverAttr]
// Only enable if we are root when running on non-windows systems.
if runtime.GOOS != "windows" && syscall.Geteuid() != 0 {
if currentlyEnabled {
d.logger.Printf("[DEBUG] driver.rkt: must run as root user, disabling")
}
delete(node.Attributes, rktDriverAttr)
return false, nil
}
outBytes, err := exec.Command("rkt", "version").Output()
if err != nil {
delete(node.Attributes, rktDriverAttr)
return false, nil
}
out := strings.TrimSpace(string(outBytes))
rktMatches := reRktVersion.FindStringSubmatch(out)
appcMatches := reAppcVersion.FindStringSubmatch(out)
if len(rktMatches) != 2 || len(appcMatches) != 2 {
delete(node.Attributes, rktDriverAttr)
return false, fmt.Errorf("Unable to parse Rkt version string: %#v", rktMatches)
}
node.Attributes[rktDriverAttr] = "1"
node.Attributes["driver.rkt.version"] = rktMatches[1]
node.Attributes["driver.rkt.appc.version"] = appcMatches[1]
minVersion, _ := version.NewVersion(minRktVersion)
currentVersion, _ := version.NewVersion(node.Attributes["driver.rkt.version"])
if currentVersion.LessThan(minVersion) {
// Do not allow rkt < 0.14.0
d.logger.Printf("[WARN] driver.rkt: please upgrade rkt to a version >= %s", minVersion)
node.Attributes[rktDriverAttr] = "0"
}
return true, nil
}
// Run an existing Rkt image.
func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig RktDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
return nil, err
}
// ACI image
img := driverConfig.ImageName
// Get the tasks local directory.
taskName := d.DriverContext.taskName
taskDir, ok := ctx.AllocDir.TaskDirs[taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Build the command.
var cmdArgs []string
// Add debug option to rkt command.
debug := driverConfig.Debug
// Add the given trust prefix
trustPrefix := driverConfig.TrustPrefix
insecure := false
if trustPrefix != "" {
var outBuf, errBuf bytes.Buffer
cmd := exec.Command("rkt", "trust", "--skip-fingerprint-review=true", fmt.Sprintf("--prefix=%s", trustPrefix), fmt.Sprintf("--debug=%t", debug))
cmd.Stdout = &outBuf
cmd.Stderr = &errBuf
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("Error running rkt trust: %s\n\nOutput: %s\n\nError: %s",
err, outBuf.String(), errBuf.String())
}
d.logger.Printf("[DEBUG] driver.rkt: added trust prefix: %q", trustPrefix)
} else {
// Disble signature verification if the trust command was not run.
insecure = true
}
cmdArgs = append(cmdArgs, "run")
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", task.Name, ctx.AllocDir.SharedDir))
cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", task.Name, ctx.AllocDir.SharedDir))
cmdArgs = append(cmdArgs, img)
if insecure == true {
cmdArgs = append(cmdArgs, "--insecure-options=all")
}
cmdArgs = append(cmdArgs, fmt.Sprintf("--debug=%t", debug))
// Inject environment variables
for k, v := range d.taskEnv.EnvMap() {
cmdArgs = append(cmdArgs, fmt.Sprintf("--set-env=%v=%v", k, v))
}
// Check if the user has overridden the exec command.
if driverConfig.Command != "" {
cmdArgs = append(cmdArgs, fmt.Sprintf("--exec=%v", driverConfig.Command))
}
// Add memory isolator
cmdArgs = append(cmdArgs, fmt.Sprintf("--memory=%vM", int64(task.Resources.MemoryMB)))
// Add CPU isolator
cmdArgs = append(cmdArgs, fmt.Sprintf("--cpu=%vm", int64(task.Resources.CPU)))
// Add DNS servers
for _, ip := range driverConfig.DNSServers {
if err := net.ParseIP(ip); err == nil {
msg := fmt.Errorf("invalid ip address for container dns server %q", ip)
d.logger.Printf("[DEBUG] driver.rkt: %v", msg)
return nil, msg
} else {
cmdArgs = append(cmdArgs, fmt.Sprintf("--dns=%s", ip))
}
}
// set DNS search domains
for _, domain := range driverConfig.DNSSearchDomains {
cmdArgs = append(cmdArgs, fmt.Sprintf("--dns-search=%s", domain))
}
// Add user passed arguments.
if len(driverConfig.Args) != 0 {
parsed := d.taskEnv.ParseAndReplace(driverConfig.Args)
// Need to start arguments with "--"
if len(parsed) > 0 {
cmdArgs = append(cmdArgs, "--")
}
for _, arg := range parsed {
cmdArgs = append(cmdArgs, fmt.Sprintf("%v", arg))
}
}
// Set the host environment variables.
filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",")
d.taskEnv.AppendHostEnvvars(filter)
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "rkt",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
}
absPath, err := GetAbsolutePath("rkt")
if err != nil {
return nil, err
}
ps, err := execIntf.LaunchCmd(&executor.ExecCommand{
Cmd: absPath,
Args: cmdArgs,
User: task.User,
}, executorCtx)
if err != nil {
pluginClient.Kill()
return nil, err
}
d.logger.Printf("[DEBUG] driver.rkt: started ACI %q with: %v", img, cmdArgs)
maxKill := d.DriverContext.config.MaxKillTimeout
h := &rktHandle{
pluginClient: pluginClient,
executor: execIntf,
executorPid: ps.Pid,
allocDir: ctx.AllocDir,
logger: d.logger,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
h.logger.Printf("[ERR] driver.rkt: error registering services for task: %q: %v", task.Name, err)
}
go h.run()
return h, nil
}
func (d *RktDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
// Parse the handle
pidBytes := []byte(strings.TrimPrefix(handleID, "Rkt:"))
id := &rktPID{}
if err := json.Unmarshal(pidBytes, id); err != nil {
return nil, fmt.Errorf("failed to parse Rkt handle '%s': %v", handleID, err)
}
pluginConfig := &plugin.ClientConfig{
Reattach: id.PluginConfig.PluginConfig(),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
d.logger.Println("[ERROR] driver.rkt: error connecting to plugin so destroying plugin pid and user pid")
if e := destroyPlugin(id.PluginConfig.Pid, id.ExecutorPid); e != nil {
d.logger.Printf("[ERROR] driver.rkt: error destroying plugin and executor pid: %v", e)
}
return nil, fmt.Errorf("error connecting to plugin: %v", err)
}
ver, _ := exec.Version()
d.logger.Printf("[DEBUG] driver.rkt: version of executor: %v", ver.Version)
// Return a driver handle
h := &rktHandle{
pluginClient: pluginClient,
executorPid: id.ExecutorPid,
allocDir: id.AllocDir,
executor: exec,
logger: d.logger,
killTimeout: id.KillTimeout,
maxKillTimeout: id.MaxKillTimeout,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
}
if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil {
h.logger.Printf("[ERR] driver.rkt: error registering services: %v", err)
}
go h.run()
return h, nil
}
func (h *rktHandle) ID() string {
// Return a handle to the PID
pid := &rktPID{
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
KillTimeout: h.killTimeout,
MaxKillTimeout: h.maxKillTimeout,
ExecutorPid: h.executorPid,
AllocDir: h.allocDir,
}
data, err := json.Marshal(pid)
if err != nil {
h.logger.Printf("[ERR] driver.rkt: failed to marshal rkt PID to JSON: %s", err)
}
return fmt.Sprintf("Rkt:%s", string(data))
}
func (h *rktHandle) WaitCh() chan *dstructs.WaitResult {
return h.waitCh
}
func (h *rktHandle) Update(task *structs.Task) error {
// Store the updated kill timeout.
h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout)
h.executor.UpdateTask(task)
// Update is not possible
return nil
}
// Kill is used to terminate the task. We send an Interrupt
// and then provide a 5 second grace period before doing a Kill.
func (h *rktHandle) Kill() error {
h.executor.ShutDown()
select {
case <-h.doneCh:
return nil
case <-time.After(h.killTimeout):
return h.executor.Exit()
}
}
func (h *rktHandle) Stats() (*cstructs.TaskResourceUsage, error) {
return nil, fmt.Errorf("stats not implemented for rkt")
}
func (h *rktHandle) run() {
ps, err := h.executor.Wait()
close(h.doneCh)
if ps.ExitCode == 0 && err != nil {
if e := killProcess(h.executorPid); e != nil {
h.logger.Printf("[ERROR] driver.rkt: error killing user process: %v", e)
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERROR] driver.rkt: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
h.waitCh <- dstructs.NewWaitResult(ps.ExitCode, 0, err)
close(h.waitCh)
// Remove services
if err := h.executor.DeregisterServices(); err != nil {
h.logger.Printf("[ERR] driver.rkt: failed to deregister services: %v", err)
}
if err := h.executor.Exit(); err != nil {
h.logger.Printf("[ERR] driver.rkt: error killing executor: %v", err)
}
h.pluginClient.Kill()
}

View File

@ -0,0 +1,77 @@
package structs
import (
"fmt"
"time"
)
const (
// The default user that the executor uses to run tasks
DefaultUnpriviledgedUser = "nobody"
// CheckBufSize is the size of the check output result
CheckBufSize = 4 * 1024
)
// WaitResult stores the result of a Wait operation.
type WaitResult struct {
ExitCode int
Signal int
Err error
}
func NewWaitResult(code, signal int, err error) *WaitResult {
return &WaitResult{
ExitCode: code,
Signal: signal,
Err: err,
}
}
func (r *WaitResult) Successful() bool {
return r.ExitCode == 0 && r.Signal == 0 && r.Err == nil
}
func (r *WaitResult) String() string {
return fmt.Sprintf("Wait returned exit code %v, signal %v, and error %v",
r.ExitCode, r.Signal, r.Err)
}
// RecoverableError wraps an error and marks whether it is recoverable and could
// be retried or it is fatal.
type RecoverableError struct {
Err error
Recoverable bool
}
// NewRecoverableError is used to wrap an error and mark it as recoverable or
// not.
func NewRecoverableError(e error, recoverable bool) *RecoverableError {
return &RecoverableError{
Err: e,
Recoverable: recoverable,
}
}
func (r *RecoverableError) Error() string {
return r.Err.Error()
}
// CheckResult encapsulates the result of a check
type CheckResult struct {
// ExitCode is the exit code of the check
ExitCode int
// Output is the output of the check script
Output string
// Timestamp is the time at which the check was executed
Timestamp time.Time
// Duration is the time it took the check to run
Duration time.Duration
// Err is the error that a check returned
Err error
}

View File

@ -0,0 +1,12 @@
// +build darwin dragonfly freebsd netbsd openbsd solaris windows
package structs
// IsolationConfig has information about the isolation mechanism the executor
// uses to put resource constraints and isolation on the user process. The
// default implementation is empty. Platforms that support resource isolation
// (e.g. Linux's Cgroups) should build their own platform-specific copy. This
// information is transmitted via RPC so it is not permissable to change the
// API.
type IsolationConfig struct {
}

View File

@ -0,0 +1,10 @@
package structs
import cgroupConfig "github.com/opencontainers/runc/libcontainer/configs"
// IsolationConfig has information about the isolation mechanism the executor
// uses to put resource constraints and isolation on the user process
type IsolationConfig struct {
Cgroup *cgroupConfig.Cgroup
CgroupPaths map[string]string
}

View File

@ -0,0 +1,69 @@
package driver
import (
"log"
"net/rpc"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/driver/logging"
"github.com/hashicorp/nomad/nomad/structs"
)
type SyslogCollectorRPC struct {
client *rpc.Client
}
type LaunchCollectorArgs struct {
Ctx *logging.LogCollectorContext
}
func (e *SyslogCollectorRPC) LaunchCollector(ctx *logging.LogCollectorContext) (*logging.SyslogCollectorState, error) {
var ss *logging.SyslogCollectorState
err := e.client.Call("Plugin.LaunchCollector", LaunchCollectorArgs{Ctx: ctx}, &ss)
return ss, err
}
func (e *SyslogCollectorRPC) Exit() error {
return e.client.Call("Plugin.Exit", new(interface{}), new(interface{}))
}
func (e *SyslogCollectorRPC) UpdateLogConfig(logConfig *structs.LogConfig) error {
return e.client.Call("Plugin.UpdateLogConfig", logConfig, new(interface{}))
}
type SyslogCollectorRPCServer struct {
Impl logging.LogCollector
}
func (s *SyslogCollectorRPCServer) LaunchCollector(args LaunchCollectorArgs,
resp *logging.SyslogCollectorState) error {
ss, err := s.Impl.LaunchCollector(args.Ctx)
if ss != nil {
*resp = *ss
}
return err
}
func (s *SyslogCollectorRPCServer) Exit(args interface{}, resp *interface{}) error {
return s.Impl.Exit()
}
func (s *SyslogCollectorRPCServer) UpdateLogConfig(logConfig *structs.LogConfig, resp *interface{}) error {
return s.Impl.UpdateLogConfig(logConfig)
}
type SyslogCollectorPlugin struct {
logger *log.Logger
Impl *SyslogCollectorRPCServer
}
func (p *SyslogCollectorPlugin) Server(*plugin.MuxBroker) (interface{}, error) {
if p.Impl == nil {
p.Impl = &SyslogCollectorRPCServer{Impl: logging.NewSyslogCollector(p.logger)}
}
return p.Impl, nil
}
func (p *SyslogCollectorPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
return &SyslogCollectorRPC{client: c}, nil
}

View File

@ -0,0 +1,170 @@
package driver
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
"github.com/hashicorp/nomad/client/driver/logging"
cstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/nomad/structs"
)
// createExecutor launches an executor plugin and returns an instance of the
// Executor interface
func createExecutor(config *plugin.ClientConfig, w io.Writer,
clientConfig *config.Config) (executor.Executor, *plugin.Client, error) {
config.HandshakeConfig = HandshakeConfig
config.Plugins = GetPluginMap(w)
config.MaxPort = clientConfig.ClientMaxPort
config.MinPort = clientConfig.ClientMinPort
// setting the setsid of the plugin process so that it doesn't get signals sent to
// the nomad client.
if config.Cmd != nil {
isolateCommand(config.Cmd)
}
executorClient := plugin.NewClient(config)
rpcClient, err := executorClient.Client()
if err != nil {
return nil, nil, fmt.Errorf("error creating rpc client for executor plugin: %v", err)
}
raw, err := rpcClient.Dispense("executor")
if err != nil {
return nil, nil, fmt.Errorf("unable to dispense the executor plugin: %v", err)
}
executorPlugin := raw.(executor.Executor)
return executorPlugin, executorClient, nil
}
func createLogCollector(config *plugin.ClientConfig, w io.Writer,
clientConfig *config.Config) (logging.LogCollector, *plugin.Client, error) {
config.HandshakeConfig = HandshakeConfig
config.Plugins = GetPluginMap(w)
config.MaxPort = clientConfig.ClientMaxPort
config.MinPort = clientConfig.ClientMinPort
if config.Cmd != nil {
isolateCommand(config.Cmd)
}
syslogClient := plugin.NewClient(config)
rpcCLient, err := syslogClient.Client()
if err != nil {
return nil, nil, fmt.Errorf("error creating rpc client for syslog plugin: %v", err)
}
raw, err := rpcCLient.Dispense("syslogcollector")
if err != nil {
return nil, nil, fmt.Errorf("unable to dispense the syslog plugin: %v", err)
}
logCollector := raw.(logging.LogCollector)
return logCollector, syslogClient, nil
}
func consulContext(clientConfig *config.Config, containerID string) *executor.ConsulContext {
return &executor.ConsulContext{
ConsulConfig: clientConfig.ConsulConfig,
ContainerID: containerID,
DockerEndpoint: clientConfig.Read("docker.endpoint"),
TLSCa: clientConfig.Read("docker.tls.ca"),
TLSCert: clientConfig.Read("docker.tls.cert"),
TLSKey: clientConfig.Read("docker.tls.key"),
}
}
// killProcess kills a process with the given pid
func killProcess(pid int) error {
proc, err := os.FindProcess(pid)
if err != nil {
return err
}
return proc.Kill()
}
// destroyPlugin kills the plugin with the given pid and also kills the user
// process
func destroyPlugin(pluginPid int, userPid int) error {
var merr error
if err := killProcess(pluginPid); err != nil {
merr = multierror.Append(merr, err)
}
if err := killProcess(userPid); err != nil {
merr = multierror.Append(merr, err)
}
return merr
}
// validateCommand validates that the command only has a single value and
// returns a user friendly error message telling them to use the passed
// argField.
func validateCommand(command, argField string) error {
trimmed := strings.TrimSpace(command)
if len(trimmed) == 0 {
return fmt.Errorf("command empty: %q", command)
}
if len(trimmed) != len(command) {
return fmt.Errorf("command contains extra white space: %q", command)
}
split := strings.Split(trimmed, " ")
if len(split) != 1 {
return fmt.Errorf("command contained more than one input. Use %q field to pass arguments", argField)
}
return nil
}
// GetKillTimeout returns the kill timeout to use given the tasks desired kill
// timeout and the operator configured max kill timeout.
func GetKillTimeout(desired, max time.Duration) time.Duration {
maxNanos := max.Nanoseconds()
desiredNanos := desired.Nanoseconds()
// Make the minimum time between signal and kill, 1 second.
if desiredNanos <= 0 {
desiredNanos = (1 * time.Second).Nanoseconds()
}
// Protect against max not being set properly.
if maxNanos <= 0 {
maxNanos = (10 * time.Second).Nanoseconds()
}
if desiredNanos < maxNanos {
return time.Duration(desiredNanos)
}
return max
}
// GetAbsolutePath returns the absolute path of the passed binary by resolving
// it in the path and following symlinks.
func GetAbsolutePath(bin string) (string, error) {
lp, err := exec.LookPath(bin)
if err != nil {
return "", fmt.Errorf("failed to resolve path to %q executable: %v", bin, err)
}
return filepath.EvalSymlinks(lp)
}
// getExecutorUser returns the user of the task, defaulting to
// cstructs.DefaultUnprivilegedUser if none was given.
func getExecutorUser(task *structs.Task) string {
if task.User == "" {
return cstructs.DefaultUnpriviledgedUser
}
return task.User
}

View File

@ -0,0 +1,18 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package driver
import (
"os/exec"
"syscall"
)
// isolateCommand sets the setsid flag in exec.Cmd to true so that the process
// becomes the process leader in a new session and doesn't receive signals that
// are sent to the parent process.
func isolateCommand(cmd *exec.Cmd) {
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &syscall.SysProcAttr{}
}
cmd.SysProcAttr.Setsid = true
}

View File

@ -0,0 +1,9 @@
package driver
import (
"os/exec"
)
// TODO Figure out if this is needed in Wondows
func isolateCommand(cmd *exec.Cmd) {
}

View File

@ -0,0 +1,26 @@
package fingerprint
import (
"log"
"runtime"
client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
)
// ArchFingerprint is used to fingerprint the architecture
type ArchFingerprint struct {
StaticFingerprinter
logger *log.Logger
}
// NewArchFingerprint is used to create an OS fingerprint
func NewArchFingerprint(logger *log.Logger) Fingerprint {
f := &ArchFingerprint{logger: logger}
return f
}
func (f *ArchFingerprint) Fingerprint(config *client.Config, node *structs.Node) (bool, error) {
node.Attributes["arch"] = runtime.GOARCH
return true, nil
}

View File

@ -0,0 +1,59 @@
// +build linux
package fingerprint
import (
"log"
"time"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
cgroupAvailable = "available"
cgroupUnavailable = "unavailable"
interval = 15
)
type CGroupFingerprint struct {
logger *log.Logger
lastState string
mountPointDetector MountPointDetector
}
// An interface to isolate calls to the cgroup library
// This facilitates testing where we can implement
// fake mount points to test various code paths
type MountPointDetector interface {
MountPoint() (string, error)
}
// Implements the interface detector which calls the cgroups library directly
type DefaultMountPointDetector struct {
}
// Call out to the default cgroup library
func (b *DefaultMountPointDetector) MountPoint() (string, error) {
return FindCgroupMountpointDir()
}
// NewCGroupFingerprint returns a new cgroup fingerprinter
func NewCGroupFingerprint(logger *log.Logger) Fingerprint {
f := &CGroupFingerprint{
logger: logger,
lastState: cgroupUnavailable,
mountPointDetector: &DefaultMountPointDetector{},
}
return f
}
// clearCGroupAttributes clears any node attributes related to cgroups that might
// have been set in a previous fingerprint run.
func (f *CGroupFingerprint) clearCGroupAttributes(n *structs.Node) {
delete(n.Attributes, "unique.cgroup.mountpoint")
}
// Periodic determines the interval at which the periodic fingerprinter will run.
func (f *CGroupFingerprint) Periodic() (bool, time.Duration) {
return true, interval * time.Second
}

View File

@ -0,0 +1,57 @@
// +build linux
package fingerprint
import (
"fmt"
client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
// FindCgroupMountpointDir is used to find the cgroup mount point on a Linux
// system.
func FindCgroupMountpointDir() (string, error) {
mount, err := cgroups.FindCgroupMountpointDir()
if err != nil {
switch e := err.(type) {
case *cgroups.NotFoundError:
// It's okay if the mount point is not discovered
return "", nil
default:
// All other errors are passed back as is
return "", e
}
}
return mount, nil
}
// Fingerprint tries to find a valid cgroup moint point
func (f *CGroupFingerprint) Fingerprint(cfg *client.Config, node *structs.Node) (bool, error) {
mount, err := f.mountPointDetector.MountPoint()
if err != nil {
f.clearCGroupAttributes(node)
return false, fmt.Errorf("Failed to discover cgroup mount point: %s", err)
}
// Check if a cgroup mount point was found
if mount == "" {
// Clear any attributes from the previous fingerprint.
f.clearCGroupAttributes(node)
if f.lastState == cgroupAvailable {
f.logger.Printf("[INFO] fingerprint.cgroups: cgroups are unavailable")
}
f.lastState = cgroupUnavailable
return true, nil
}
node.Attributes["unique.cgroup.mountpoint"] = mount
if f.lastState == cgroupUnavailable {
f.logger.Printf("[INFO] fingerprint.cgroups: cgroups are available")
}
f.lastState = cgroupAvailable
return true, nil
}

View File

@ -0,0 +1,100 @@
package fingerprint
import (
"fmt"
"log"
"strconv"
"time"
consul "github.com/hashicorp/consul/api"
client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
consulAvailable = "available"
consulUnavailable = "unavailable"
)
// ConsulFingerprint is used to fingerprint the architecture
type ConsulFingerprint struct {
logger *log.Logger
client *consul.Client
lastState string
}
// NewConsulFingerprint is used to create an OS fingerprint
func NewConsulFingerprint(logger *log.Logger) Fingerprint {
return &ConsulFingerprint{logger: logger, lastState: consulUnavailable}
}
func (f *ConsulFingerprint) Fingerprint(config *client.Config, node *structs.Node) (bool, error) {
// Guard against uninitialized Links
if node.Links == nil {
node.Links = map[string]string{}
}
// Only create the client once to avoid creating too many connections to
// Consul.
if f.client == nil {
consulConfig, err := config.ConsulConfig.ApiConfig()
if err != nil {
return false, fmt.Errorf("Failed to initialize the Consul client config: %v", err)
}
f.client, err = consul.NewClient(consulConfig)
if err != nil {
return false, fmt.Errorf("Failed to initialize consul client: %s", err)
}
}
// We'll try to detect consul by making a query to to the agent's self API.
// If we can't hit this URL consul is probably not running on this machine.
info, err := f.client.Agent().Self()
if err != nil {
// Clear any attributes set by a previous fingerprint.
f.clearConsulAttributes(node)
// Print a message indicating that the Consul Agent is not available
// anymore
if f.lastState == consulAvailable {
f.logger.Printf("[INFO] fingerprint.consul: consul agent is unavailable")
}
f.lastState = consulUnavailable
return false, nil
}
node.Attributes["consul.server"] = strconv.FormatBool(info["Config"]["Server"].(bool))
node.Attributes["consul.version"] = info["Config"]["Version"].(string)
node.Attributes["consul.revision"] = info["Config"]["Revision"].(string)
node.Attributes["unique.consul.name"] = info["Config"]["NodeName"].(string)
node.Attributes["consul.datacenter"] = info["Config"]["Datacenter"].(string)
node.Links["consul"] = fmt.Sprintf("%s.%s",
node.Attributes["consul.datacenter"],
node.Attributes["unique.consul.name"])
// If the Consul Agent was previously unavailable print a message to
// indicate the Agent is available now
if f.lastState == consulUnavailable {
f.logger.Printf("[INFO] fingerprint.consul: consul agent is available")
}
f.lastState = consulAvailable
return true, nil
}
// clearConsulAttributes removes consul attributes and links from the passed
// Node.
func (f *ConsulFingerprint) clearConsulAttributes(n *structs.Node) {
delete(n.Attributes, "consul.server")
delete(n.Attributes, "consul.version")
delete(n.Attributes, "consul.revision")
delete(n.Attributes, "unique.consul.name")
delete(n.Attributes, "consul.datacenter")
delete(n.Links, "consul")
}
func (f *ConsulFingerprint) Periodic() (bool, time.Duration) {
return true, 15 * time.Second
}

View File

@ -0,0 +1,52 @@
package fingerprint
import (
"fmt"
"log"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/helper/stats"
"github.com/hashicorp/nomad/nomad/structs"
)
// CPUFingerprint is used to fingerprint the CPU
type CPUFingerprint struct {
StaticFingerprinter
logger *log.Logger
}
// NewCPUFingerprint is used to create a CPU fingerprint
func NewCPUFingerprint(logger *log.Logger) Fingerprint {
f := &CPUFingerprint{logger: logger}
return f
}
func (f *CPUFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
if err := stats.Init(); err != nil {
return false, fmt.Errorf("Unable to obtain CPU information: %v", err)
}
modelName := stats.CPUModelName()
if modelName != "" {
node.Attributes["cpu.modelname"] = modelName
}
mhz := stats.CPUMHzPerCore()
node.Attributes["cpu.frequency"] = fmt.Sprintf("%.0f", mhz)
f.logger.Printf("[DEBUG] fingerprint.cpu: frequency: %.0f MHz", mhz)
numCores := stats.CPUNumCores()
node.Attributes["cpu.numcores"] = fmt.Sprintf("%d", numCores)
f.logger.Printf("[DEBUG] fingerprint.cpu: core count: %d", numCores)
tt := stats.TotalTicksAvailable()
node.Attributes["cpu.totalcompute"] = fmt.Sprintf("%.0f", tt)
if node.Resources == nil {
node.Resources = &structs.Resources{}
}
node.Resources.CPU = int(tt)
return true, nil
}

View File

@ -0,0 +1,250 @@
package fingerprint
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"time"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
)
// This is where the AWS metadata server normally resides. We hardcode the
// "instance" path as well since it's the only one we access here.
const DEFAULT_AWS_URL = "http://169.254.169.254/latest/meta-data/"
// map of instance type to approximate speed, in Mbits/s
// http://serverfault.com/questions/324883/aws-bandwidth-and-content-delivery/326797#326797
// which itself cites these sources:
// - http://blog.rightscale.com/2007/10/28/network-performance-within-amazon-ec2-and-to-amazon-s3/
// - http://www.soc.napier.ac.uk/~bill/chris_p.pdf
//
// This data is meant for a loose approximation
var ec2InstanceSpeedMap = map[string]int{
"m4.large": 80,
"m3.medium": 80,
"m3.large": 80,
"c4.large": 80,
"c3.large": 80,
"c3.xlarge": 80,
"r3.large": 80,
"r3.xlarge": 80,
"i2.xlarge": 80,
"d2.xlarge": 80,
"t2.micro": 16,
"t2.small": 16,
"t2.medium": 16,
"t2.large": 16,
"m4.xlarge": 760,
"m4.2xlarge": 760,
"m4.4xlarge": 760,
"m3.xlarge": 760,
"m3.2xlarge": 760,
"c4.xlarge": 760,
"c4.2xlarge": 760,
"c4.4xlarge": 760,
"c3.2xlarge": 760,
"c3.4xlarge": 760,
"g2.2xlarge": 760,
"r3.2xlarge": 760,
"r3.4xlarge": 760,
"i2.2xlarge": 760,
"i2.4xlarge": 760,
"d2.2xlarge": 760,
"d2.4xlarge": 760,
"m4.10xlarge": 10000,
"c4.8xlarge": 10000,
"c3.8xlarge": 10000,
"g2.8xlarge": 10000,
"r3.8xlarge": 10000,
"i2.8xlarge": 10000,
"d2.8xlarge": 10000,
}
// EnvAWSFingerprint is used to fingerprint AWS metadata
type EnvAWSFingerprint struct {
StaticFingerprinter
logger *log.Logger
}
// NewEnvAWSFingerprint is used to create a fingerprint from AWS metadata
func NewEnvAWSFingerprint(logger *log.Logger) Fingerprint {
f := &EnvAWSFingerprint{logger: logger}
return f
}
func (f *EnvAWSFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
if !f.isAWS() {
return false, nil
}
// newNetwork is populated and addded to the Nodes resources
newNetwork := &structs.NetworkResource{
Device: "eth0",
}
if node.Links == nil {
node.Links = make(map[string]string)
}
metadataURL := os.Getenv("AWS_ENV_URL")
if metadataURL == "" {
metadataURL = DEFAULT_AWS_URL
}
// assume 2 seconds is enough time for inside AWS network
client := &http.Client{
Timeout: 2 * time.Second,
Transport: cleanhttp.DefaultTransport(),
}
// Keys and whether they should be namespaced as unique. Any key whose value
// uniquely identifies a node, such as ip, should be marked as unique. When
// marked as unique, the key isn't included in the computed node class.
keys := map[string]bool{
"ami-id": true,
"hostname": true,
"instance-id": true,
"instance-type": false,
"local-hostname": true,
"local-ipv4": true,
"public-hostname": true,
"public-ipv4": true,
"placement/availability-zone": false,
}
for k, unique := range keys {
res, err := client.Get(metadataURL + k)
if res.StatusCode != http.StatusOK {
f.logger.Printf("[WARN]: fingerprint.env_aws: Could not read value for attribute %q", k)
continue
}
if err != nil {
// if it's a URL error, assume we're not in an AWS environment
// TODO: better way to detect AWS? Check xen virtualization?
if _, ok := err.(*url.Error); ok {
return false, nil
}
// not sure what other errors it would return
return false, err
}
resp, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading response body for AWS %s", k)
}
// assume we want blank entries
key := "platform.aws." + strings.Replace(k, "/", ".", -1)
if unique {
key = structs.UniqueNamespace(key)
}
node.Attributes[key] = strings.Trim(string(resp), "\n")
}
// copy over network specific information
if val := node.Attributes["unique.platform.aws.local-ipv4"]; val != "" {
node.Attributes["unique.network.ip-address"] = val
newNetwork.IP = val
newNetwork.CIDR = newNetwork.IP + "/32"
}
// find LinkSpeed from lookup
if throughput := f.linkSpeed(); throughput > 0 {
newNetwork.MBits = throughput
}
if node.Resources == nil {
node.Resources = &structs.Resources{}
}
node.Resources.Networks = append(node.Resources.Networks, newNetwork)
// populate Node Network Resources
// populate Links
node.Links["aws.ec2"] = fmt.Sprintf("%s.%s",
node.Attributes["platform.aws.placement.availability-zone"],
node.Attributes["unique.platform.aws.instance-id"])
return true, nil
}
func (f *EnvAWSFingerprint) isAWS() bool {
// Read the internal metadata URL from the environment, allowing test files to
// provide their own
metadataURL := os.Getenv("AWS_ENV_URL")
if metadataURL == "" {
metadataURL = DEFAULT_AWS_URL
}
// assume 2 seconds is enough time for inside AWS network
client := &http.Client{
Timeout: 2 * time.Second,
Transport: cleanhttp.DefaultTransport(),
}
// Query the metadata url for the ami-id, to veryify we're on AWS
resp, err := client.Get(metadataURL + "ami-id")
if err != nil {
f.logger.Printf("[DEBUG] fingerprint.env_aws: Error querying AWS Metadata URL, skipping")
return false
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
// URL not found, which indicates that this isn't AWS
return false
}
instanceID, err := ioutil.ReadAll(resp.Body)
if err != nil {
f.logger.Printf("[DEBUG] fingerprint.env_aws: Error reading AWS Instance ID, skipping")
return false
}
match, err := regexp.MatchString("ami-*", string(instanceID))
if err != nil || !match {
return false
}
return true
}
// EnvAWSFingerprint uses lookup table to approximate network speeds
func (f *EnvAWSFingerprint) linkSpeed() int {
// Query the API for the instance type, and use the table above to approximate
// the network speed
metadataURL := os.Getenv("AWS_ENV_URL")
if metadataURL == "" {
metadataURL = DEFAULT_AWS_URL
}
// assume 2 seconds is enough time for inside AWS network
client := &http.Client{
Timeout: 2 * time.Second,
Transport: cleanhttp.DefaultTransport(),
}
res, err := client.Get(metadataURL + "instance-type")
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading response body for instance-type")
return 0
}
key := strings.Trim(string(body), "\n")
v, ok := ec2InstanceSpeedMap[key]
if !ok {
return 0
}
return v
}

View File

@ -0,0 +1,270 @@
package fingerprint
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
)
// This is where the GCE metadata server normally resides. We hardcode the
// "instance" path as well since it's the only one we access here.
const DEFAULT_GCE_URL = "http://169.254.169.254/computeMetadata/v1/instance/"
type GCEMetadataNetworkInterface struct {
AccessConfigs []struct {
ExternalIp string
Type string
}
ForwardedIps []string
Ip string
Network string
}
type ReqError struct {
StatusCode int
}
func (e ReqError) Error() string {
return http.StatusText(e.StatusCode)
}
func lastToken(s string) string {
index := strings.LastIndex(s, "/")
return s[index+1:]
}
// EnvGCEFingerprint is used to fingerprint GCE metadata
type EnvGCEFingerprint struct {
StaticFingerprinter
client *http.Client
logger *log.Logger
metadataURL string
}
// NewEnvGCEFingerprint is used to create a fingerprint from GCE metadata
func NewEnvGCEFingerprint(logger *log.Logger) Fingerprint {
// Read the internal metadata URL from the environment, allowing test files to
// provide their own
metadataURL := os.Getenv("GCE_ENV_URL")
if metadataURL == "" {
metadataURL = DEFAULT_GCE_URL
}
// assume 2 seconds is enough time for inside GCE network
client := &http.Client{
Timeout: 2 * time.Second,
Transport: cleanhttp.DefaultTransport(),
}
return &EnvGCEFingerprint{
client: client,
logger: logger,
metadataURL: metadataURL,
}
}
func (f *EnvGCEFingerprint) Get(attribute string, recursive bool) (string, error) {
reqUrl := f.metadataURL + attribute
if recursive {
reqUrl = reqUrl + "?recursive=true"
}
parsedUrl, err := url.Parse(reqUrl)
if err != nil {
return "", err
}
req := &http.Request{
Method: "GET",
URL: parsedUrl,
Header: http.Header{
"Metadata-Flavor": []string{"Google"},
},
}
res, err := f.client.Do(req)
if err != nil || res.StatusCode != http.StatusOK {
f.logger.Printf("[DEBUG] fingerprint.env_gce: Could not read value for attribute %q", attribute)
return "", err
}
resp, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
f.logger.Printf("[ERR] fingerprint.env_gce: Error reading response body for GCE %s", attribute)
return "", err
}
if res.StatusCode >= 400 {
return "", ReqError{res.StatusCode}
}
return string(resp), nil
}
func checkError(err error, logger *log.Logger, desc string) error {
// If it's a URL error, assume we're not actually in an GCE environment.
// To the outer layers, this isn't an error so return nil.
if _, ok := err.(*url.Error); ok {
logger.Printf("[DEBUG] fingerprint.env_gce: Error querying GCE " + desc + ", skipping")
return nil
}
// Otherwise pass the error through.
return err
}
func (f *EnvGCEFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
if !f.isGCE() {
return false, nil
}
if node.Links == nil {
node.Links = make(map[string]string)
}
// Keys and whether they should be namespaced as unique. Any key whose value
// uniquely identifies a node, such as ip, should be marked as unique. When
// marked as unique, the key isn't included in the computed node class.
keys := map[string]bool{
"hostname": true,
"id": true,
"cpu-platform": false,
"scheduling/automatic-restart": false,
"scheduling/on-host-maintenance": false,
}
for k, unique := range keys {
value, err := f.Get(k, false)
if err != nil {
return false, checkError(err, f.logger, k)
}
// assume we want blank entries
key := "platform.gce." + strings.Replace(k, "/", ".", -1)
if unique {
key = structs.UniqueNamespace(key)
}
node.Attributes[key] = strings.Trim(string(value), "\n")
}
// These keys need everything before the final slash removed to be usable.
keys = map[string]bool{
"machine-type": false,
"zone": false,
}
for k, unique := range keys {
value, err := f.Get(k, false)
if err != nil {
return false, checkError(err, f.logger, k)
}
key := "platform.gce." + k
if unique {
key = structs.UniqueNamespace(key)
}
node.Attributes[key] = strings.Trim(lastToken(value), "\n")
}
// Get internal and external IPs (if they exist)
value, err := f.Get("network-interfaces/", true)
var interfaces []GCEMetadataNetworkInterface
if err := json.Unmarshal([]byte(value), &interfaces); err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding network interface information: %s", err.Error())
}
for _, intf := range interfaces {
prefix := "platform.gce.network." + lastToken(intf.Network)
uniquePrefix := "unique." + prefix
node.Attributes[prefix] = "true"
node.Attributes[uniquePrefix+".ip"] = strings.Trim(intf.Ip, "\n")
for index, accessConfig := range intf.AccessConfigs {
node.Attributes[uniquePrefix+".external-ip."+strconv.Itoa(index)] = accessConfig.ExternalIp
}
}
var tagList []string
value, err = f.Get("tags", false)
if err != nil {
return false, checkError(err, f.logger, "tags")
}
if err := json.Unmarshal([]byte(value), &tagList); err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding instance tags: %s", err.Error())
}
for _, tag := range tagList {
attr := "platform.gce.tag."
var key string
// If the tag is namespaced as unique, we strip it from the tag and
// prepend to the whole attribute.
if structs.IsUniqueNamespace(tag) {
tag = strings.TrimPrefix(tag, structs.NodeUniqueNamespace)
key = fmt.Sprintf("%s%s%s", structs.NodeUniqueNamespace, attr, tag)
} else {
key = fmt.Sprintf("%s%s", attr, tag)
}
node.Attributes[key] = "true"
}
var attrDict map[string]string
value, err = f.Get("attributes/", true)
if err != nil {
return false, checkError(err, f.logger, "attributes/")
}
if err := json.Unmarshal([]byte(value), &attrDict); err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding instance attributes: %s", err.Error())
}
for k, v := range attrDict {
attr := "platform.gce.attr."
var key string
// If the key is namespaced as unique, we strip it from the
// key and prepend to the whole attribute.
if structs.IsUniqueNamespace(k) {
k = strings.TrimPrefix(k, structs.NodeUniqueNamespace)
key = fmt.Sprintf("%s%s%s", structs.NodeUniqueNamespace, attr, k)
} else {
key = fmt.Sprintf("%s%s", attr, k)
}
node.Attributes[key] = strings.Trim(v, "\n")
}
// populate Links
node.Links["gce"] = node.Attributes["unique.platform.gce.id"]
return true, nil
}
func (f *EnvGCEFingerprint) isGCE() bool {
// TODO: better way to detect GCE?
// Query the metadata url for the machine type, to verify we're on GCE
machineType, err := f.Get("machine-type", false)
if err != nil {
if re, ok := err.(ReqError); !ok || re.StatusCode != 404 {
// If it wasn't a 404 error, print an error message.
f.logger.Printf("[DEBUG] fingerprint.env_gce: Error querying GCE Metadata URL, skipping")
}
return false
}
match, err := regexp.MatchString("projects/.+/machineTypes/.+", machineType)
if err != nil || !match {
return false
}
return true
}

Some files were not shown because too many files have changed in this diff Show More