diff --git a/builtin/providers/nomad/provider.go b/builtin/providers/nomad/provider.go
new file mode 100644
index 000000000..2da83b689
--- /dev/null
+++ b/builtin/providers/nomad/provider.go
@@ -0,0 +1,49 @@
+package nomad
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/nomad/api"
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ Schema: map[string]*schema.Schema{
+ "address": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("NOMAD_ADDR", nil),
+ Description: "URL of the root of the target Nomad agent.",
+ },
+
+ "region": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("NOMAD_REGION", ""),
+ Description: "Region of the target Nomad agent.",
+ },
+ },
+
+ ConfigureFunc: providerConfigure,
+
+ ResourcesMap: map[string]*schema.Resource{
+ "nomad_job": resourceJob(),
+ },
+ }
+}
+
+func providerConfigure(d *schema.ResourceData) (interface{}, error) {
+ config := &api.Config{
+ Address: d.Get("address").(string),
+ Region: d.Get("region").(string),
+ }
+
+ client, err := api.NewClient(config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to configure Nomad API: %s", err)
+ }
+
+ return client, nil
+}
diff --git a/builtin/providers/nomad/provider_test.go b/builtin/providers/nomad/provider_test.go
new file mode 100644
index 000000000..edbf9abe0
--- /dev/null
+++ b/builtin/providers/nomad/provider_test.go
@@ -0,0 +1,44 @@
+package nomad
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// How to run the acceptance tests for this provider:
+//
+// - Obtain an official Nomad release from https://nomadproject.io
+// and extract the "nomad" binary
+//
+// - Run the following to start the Nomad agent in development mode:
+// nomad agent -dev
+//
+// - Run the Terraform acceptance tests as usual:
+// make testacc TEST=./builtin/providers/nomad
+//
+// The tests expect to be run in a fresh, empty Nomad server.
+
+func TestProvider(t *testing.T) {
+ if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+var testProvider *schema.Provider
+var testProviders map[string]terraform.ResourceProvider
+
+func init() {
+ testProvider = Provider().(*schema.Provider)
+ testProviders = map[string]terraform.ResourceProvider{
+ "nomad": testProvider,
+ }
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("NOMAD_ADDR"); v == "" {
+ os.Setenv("NOMAD_ADDR", "http://127.0.0.1:4646")
+ }
+}
diff --git a/builtin/providers/nomad/resource_job.go b/builtin/providers/nomad/resource_job.go
new file mode 100644
index 000000000..36998db7f
--- /dev/null
+++ b/builtin/providers/nomad/resource_job.go
@@ -0,0 +1,196 @@
+package nomad
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+
+ "github.com/hashicorp/nomad/api"
+ "github.com/hashicorp/nomad/jobspec"
+ "github.com/hashicorp/nomad/nomad/structs"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+func resourceJob() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceJobRegister,
+ Update: resourceJobRegister,
+ Delete: resourceJobDeregister,
+ Read: resourceJobRead,
+ Exists: resourceJobExists,
+
+ Schema: map[string]*schema.Schema{
+ "jobspec": {
+ Description: "Job specification. If you want to point to a file use the file() function.",
+ Required: true,
+ Type: schema.TypeString,
+ DiffSuppressFunc: jobspecDiffSuppress,
+ },
+
+ "deregister_on_destroy": {
+ Description: "If true, the job will be deregistered on destroy.",
+ Optional: true,
+ Default: true,
+ Type: schema.TypeBool,
+ },
+
+ "deregister_on_id_change": {
+ Description: "If true, the job will be deregistered when the job ID changes.",
+ Optional: true,
+ Default: true,
+ Type: schema.TypeBool,
+ },
+ },
+ }
+}
+
+func resourceJobRegister(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*api.Client)
+
+ // Get the jobspec itself
+ jobspecRaw := d.Get("jobspec").(string)
+
+ // Parse it
+ jobspecStruct, err := jobspec.Parse(strings.NewReader(jobspecRaw))
+ if err != nil {
+ return fmt.Errorf("error parsing jobspec: %s", err)
+ }
+
+ // Initialize and validate
+ jobspecStruct.Canonicalize()
+ if err := jobspecStruct.Validate(); err != nil {
+ return fmt.Errorf("Error validating job: %v", err)
+ }
+
+ // If we have an ID and its not equal to this jobspec, then we
+ // have to deregister the old job before we register the new job.
+ prevId := d.Id()
+ if !d.Get("deregister_on_id_change").(bool) {
+ // If we aren't deregistering on ID change, just pretend we
+ // don't have a prior ID.
+ prevId = ""
+ }
+ if prevId != "" && prevId != jobspecStruct.ID {
+ log.Printf(
+ "[INFO] Deregistering %q before registering %q",
+ prevId, jobspecStruct.ID)
+
+ log.Printf("[DEBUG] Deregistering job: %q", prevId)
+ _, _, err := client.Jobs().Deregister(prevId, nil)
+ if err != nil {
+ return fmt.Errorf(
+ "error deregistering previous job %q "+
+ "before registering new job %q: %s",
+ prevId, jobspecStruct.ID, err)
+ }
+
+ // Success! Clear our state.
+ d.SetId("")
+ }
+
+ // Convert it so that we can use it with the API
+ jobspecAPI, err := convertStructJob(jobspecStruct)
+ if err != nil {
+ return fmt.Errorf("error converting jobspec: %s", err)
+ }
+
+ // Register the job
+ _, _, err = client.Jobs().Register(jobspecAPI, nil)
+ if err != nil {
+ return fmt.Errorf("error applying jobspec: %s", err)
+ }
+
+ d.SetId(jobspecAPI.ID)
+
+ return nil
+}
+
+func resourceJobDeregister(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*api.Client)
+
+ // If deregistration is disabled, then do nothing
+ if !d.Get("deregister_on_destroy").(bool) {
+ log.Printf(
+ "[WARN] Job %q will not deregister since 'deregister_on_destroy'"+
+ " is false", d.Id())
+ return nil
+ }
+
+ id := d.Id()
+ log.Printf("[DEBUG] Deregistering job: %q", id)
+ _, _, err := client.Jobs().Deregister(id, nil)
+ if err != nil {
+ return fmt.Errorf("error deregistering job: %s", err)
+ }
+
+ return nil
+}
+
+func resourceJobExists(d *schema.ResourceData, meta interface{}) (bool, error) {
+ client := meta.(*api.Client)
+
+ id := d.Id()
+ log.Printf("[DEBUG] Checking if job exists: %q", id)
+ _, _, err := client.Jobs().Info(id, nil)
+ if err != nil {
+ // As of Nomad 0.4.1, the API client returns an error for 404
+ // rather than a nil result, so we must check this way.
+ if strings.Contains(err.Error(), "404") {
+ return false, nil
+ }
+
+ return true, fmt.Errorf("error checking for job: %#v", err)
+ }
+
+ return true, nil
+}
+
+func resourceJobRead(d *schema.ResourceData, meta interface{}) error {
+ // We don't do anything at the moment. Exists is used to
+ // remove non-existent jobs but read doesn't have to do anything.
+ return nil
+}
+
+// convertStructJob is used to take a *structs.Job and convert it to an *api.Job.
+//
+// This is unfortunate but it is how Nomad itself does it (this is copied
+// line for line from Nomad). We'll mimic them exactly to get this done.
+func convertStructJob(in *structs.Job) (*api.Job, error) {
+ gob.Register([]map[string]interface{}{})
+ gob.Register([]interface{}{})
+ var apiJob *api.Job
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(in); err != nil {
+ return nil, err
+ }
+ if err := gob.NewDecoder(buf).Decode(&apiJob); err != nil {
+ return nil, err
+ }
+ return apiJob, nil
+}
+
+// jobspecDiffSuppress is the DiffSuppressFunc used by the schema to
+// check if two jobspecs are equal.
+func jobspecDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
+ // Parse the old job
+ oldJob, err := jobspec.Parse(strings.NewReader(old))
+ if err != nil {
+ return false
+ }
+
+ // Parse the new job
+ newJob, err := jobspec.Parse(strings.NewReader(new))
+ if err != nil {
+ return false
+ }
+
+ // Init
+ oldJob.Canonicalize()
+ newJob.Canonicalize()
+
+ // Check for jobspec equality
+ return reflect.DeepEqual(oldJob, newJob)
+}
diff --git a/builtin/providers/nomad/resource_job_test.go b/builtin/providers/nomad/resource_job_test.go
new file mode 100644
index 000000000..6562e2988
--- /dev/null
+++ b/builtin/providers/nomad/resource_job_test.go
@@ -0,0 +1,283 @@
+package nomad
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ r "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+
+ "github.com/hashicorp/nomad/api"
+)
+
+func TestResourceJob_basic(t *testing.T) {
+ r.Test(t, r.TestCase{
+ Providers: testProviders,
+ PreCheck: func() { testAccPreCheck(t) },
+ Steps: []r.TestStep{
+ r.TestStep{
+ Config: testResourceJob_initialConfig,
+ Check: testResourceJob_initialCheck,
+ },
+ },
+
+ CheckDestroy: testResourceJob_checkDestroy("foo"),
+ })
+}
+
+func TestResourceJob_refresh(t *testing.T) {
+ r.Test(t, r.TestCase{
+ Providers: testProviders,
+ PreCheck: func() { testAccPreCheck(t) },
+ Steps: []r.TestStep{
+ r.TestStep{
+ Config: testResourceJob_initialConfig,
+ Check: testResourceJob_initialCheck,
+ },
+
+ // This should successfully cause the job to be recreated,
+ // testing the Exists function.
+ r.TestStep{
+ PreConfig: testResourceJob_deregister(t, "foo"),
+ Config: testResourceJob_initialConfig,
+ },
+ },
+ })
+}
+
+func TestResourceJob_disableDestroyDeregister(t *testing.T) {
+ r.Test(t, r.TestCase{
+ Providers: testProviders,
+ PreCheck: func() { testAccPreCheck(t) },
+ Steps: []r.TestStep{
+ r.TestStep{
+ Config: testResourceJob_noDestroy,
+ Check: testResourceJob_initialCheck,
+ },
+
+ // Destroy with our setting set
+ r.TestStep{
+ Destroy: true,
+ Config: testResourceJob_noDestroy,
+ Check: testResourceJob_checkExists,
+ },
+
+ // Re-apply without the setting set
+ r.TestStep{
+ Config: testResourceJob_initialConfig,
+ Check: testResourceJob_checkExists,
+ },
+ },
+ })
+}
+
+func TestResourceJob_idChange(t *testing.T) {
+ r.Test(t, r.TestCase{
+ Providers: testProviders,
+ PreCheck: func() { testAccPreCheck(t) },
+ Steps: []r.TestStep{
+ r.TestStep{
+ Config: testResourceJob_initialConfig,
+ Check: testResourceJob_initialCheck,
+ },
+
+ // Change our ID
+ r.TestStep{
+ Config: testResourceJob_updateConfig,
+ Check: testResourceJob_updateCheck,
+ },
+ },
+ })
+}
+
+var testResourceJob_initialConfig = `
+resource "nomad_job" "test" {
+ jobspec = < [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
+
+ ```go
+ logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
+ ```
+
+Third party logging formatters:
+
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 000000000..dddd5f877
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 000000000..89e966e7b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,264 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ os.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 000000000..9a0120ac1
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 000000000..104d689f1
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,48 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 000000000..3f151cdc3
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 000000000..2ad6dc5cf
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,41 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(timestampFormat)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 000000000..2fdb23176
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,212 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ return NewEntry(logger).WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugf(format, args...)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infof(format, args...)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorf(format, args...)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalf(format, args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicf(format, args...)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debug(args...)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Info(args...)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Error(args...)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatal(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panic(args...)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugln(args...)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infoln(args...)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorln(args...)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalln(args...)
+ }
+ os.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicln(args...)
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 000000000..e59669111
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 000000000..71f8d67a5
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,9 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 000000000..a2c0b40db
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 000000000..b343b3a37
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 000000000..3e70bf7bf
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris
+
+package logrus
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 000000000..0146845d1
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 000000000..06ef20233
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,161 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var keys []string = make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+
+ switch value := value.(type) {
+ case string:
+ if needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ case error:
+ errmsg := value.Error()
+ if needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+
+ b.WriteByte(' ')
+}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 000000000..1e30b1c75
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,31 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ go logger.writerScanner(reader)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ logger.Print(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/StackExchange/wmi/LICENSE b/vendor/github.com/StackExchange/wmi/LICENSE
new file mode 100644
index 000000000..ae80b6720
--- /dev/null
+++ b/vendor/github.com/StackExchange/wmi/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Stack Exchange
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/StackExchange/wmi/README.md b/vendor/github.com/StackExchange/wmi/README.md
new file mode 100644
index 000000000..3d5f67e14
--- /dev/null
+++ b/vendor/github.com/StackExchange/wmi/README.md
@@ -0,0 +1,4 @@
+wmi
+===
+
+Package wmi provides a WQL interface for WMI on Windows.
diff --git a/vendor/github.com/StackExchange/wmi/wmi.go b/vendor/github.com/StackExchange/wmi/wmi.go
new file mode 100644
index 000000000..b931ca57a
--- /dev/null
+++ b/vendor/github.com/StackExchange/wmi/wmi.go
@@ -0,0 +1,416 @@
+// +build windows
+
+/*
+Package wmi provides a WQL interface for WMI on Windows.
+
+Example code to print names of running processes:
+
+ type Win32_Process struct {
+ Name string
+ }
+
+ func main() {
+ var dst []Win32_Process
+ q := wmi.CreateQuery(&dst, "")
+ err := wmi.Query(q, &dst)
+ if err != nil {
+ log.Fatal(err)
+ }
+ for i, v := range dst {
+ println(i, v.Name)
+ }
+ }
+
+*/
+package wmi
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-ole/go-ole"
+ "github.com/go-ole/go-ole/oleutil"
+)
+
+var l = log.New(os.Stdout, "", log.LstdFlags)
+
+var (
+ ErrInvalidEntityType = errors.New("wmi: invalid entity type")
+ lock sync.Mutex
+)
+
+// QueryNamespace invokes Query with the given namespace on the local machine.
+func QueryNamespace(query string, dst interface{}, namespace string) error {
+ return Query(query, dst, nil, namespace)
+}
+
+// Query runs the WQL query and appends the values to dst.
+//
+// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
+// the query must have the same name in dst. Supported types are all signed and
+// unsigned integers, time.Time, string, bool, or a pointer to one of those.
+// Array types are not supported.
+//
+// By default, the local machine and default namespace are used. These can be
+// changed using connectServerArgs. See
+// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
+//
+// Query is a wrapper around DefaultClient.Query.
+func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
+ return DefaultClient.Query(query, dst, connectServerArgs...)
+}
+
+// A Client is an WMI query client.
+//
+// Its zero value (DefaultClient) is a usable client.
+type Client struct {
+ // NonePtrZero specifies if nil values for fields which aren't pointers
+ // should be returned as the field types zero value.
+ //
+ // Setting this to true allows stucts without pointer fields to be used
+ // without the risk failure should a nil value returned from WMI.
+ NonePtrZero bool
+
+ // PtrNil specifies if nil values for pointer fields should be returned
+ // as nil.
+ //
+ // Setting this to true will set pointer fields to nil where WMI
+ // returned nil, otherwise the types zero value will be returned.
+ PtrNil bool
+
+ // AllowMissingFields specifies that struct fields not present in the
+ // query result should not result in an error.
+ //
+ // Setting this to true allows custom queries to be used with full
+ // struct definitions instead of having to define multiple structs.
+ AllowMissingFields bool
+}
+
+// DefaultClient is the default Client and is used by Query, QueryNamespace
+var DefaultClient = &Client{}
+
+// Query runs the WQL query and appends the values to dst.
+//
+// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
+// the query must have the same name in dst. Supported types are all signed and
+// unsigned integers, time.Time, string, bool, or a pointer to one of those.
+// Array types are not supported.
+//
+// By default, the local machine and default namespace are used. These can be
+// changed using connectServerArgs. See
+// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
+func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
+ dv := reflect.ValueOf(dst)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return ErrInvalidEntityType
+ }
+ dv = dv.Elem()
+ mat, elemType := checkMultiArg(dv)
+ if mat == multiArgTypeInvalid {
+ return ErrInvalidEntityType
+ }
+
+ lock.Lock()
+ defer lock.Unlock()
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
+ if err != nil {
+ oleerr := err.(*ole.OleError)
+ // S_FALSE = 0x00000001 // CoInitializeEx was already called on this thread
+ if oleerr.Code() != ole.S_OK && oleerr.Code() != 0x00000001 {
+ return err
+ }
+ } else {
+ // Only invoke CoUninitialize if the thread was not initizlied before.
+ // This will allow other go packages based on go-ole play along
+ // with this library.
+ defer ole.CoUninitialize()
+ }
+
+ unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
+ if err != nil {
+ return err
+ }
+ defer unknown.Release()
+
+ wmi, err := unknown.QueryInterface(ole.IID_IDispatch)
+ if err != nil {
+ return err
+ }
+ defer wmi.Release()
+
+ // service is a SWbemServices
+ serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...)
+ if err != nil {
+ return err
+ }
+ service := serviceRaw.ToIDispatch()
+ defer serviceRaw.Clear()
+
+ // result is a SWBemObjectSet
+ resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query)
+ if err != nil {
+ return err
+ }
+ result := resultRaw.ToIDispatch()
+ defer resultRaw.Clear()
+
+ count, err := oleInt64(result, "Count")
+ if err != nil {
+ return err
+ }
+
+ // Initialize a slice with Count capacity
+ dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
+
+ var errFieldMismatch error
+ for i := int64(0); i < count; i++ {
+ err := func() error {
+ // item is a SWbemObject, but really a Win32_Process
+ itemRaw, err := oleutil.CallMethod(result, "ItemIndex", i)
+ if err != nil {
+ return err
+ }
+ item := itemRaw.ToIDispatch()
+ defer itemRaw.Clear()
+
+ ev := reflect.New(elemType)
+ if err = c.loadEntity(ev.Interface(), item); err != nil {
+ if _, ok := err.(*ErrFieldMismatch); ok {
+ // We continue loading entities even in the face of field mismatch errors.
+ // If we encounter any other error, that other error is returned. Otherwise,
+ // an ErrFieldMismatch is returned.
+ errFieldMismatch = err
+ } else {
+ return err
+ }
+ }
+ if mat != multiArgTypeStructPtr {
+ ev = ev.Elem()
+ }
+ dv.Set(reflect.Append(dv, ev))
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+ }
+ return errFieldMismatch
+}
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct.
+// StructType is the type of the struct pointed to by the destination argument.
+type ErrFieldMismatch struct {
+ StructType reflect.Type
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("wmi: cannot load field %q into a %q: %s",
+ e.FieldName, e.StructType, e.Reason)
+}
+
+var timeType = reflect.TypeOf(time.Time{})
+
+// loadEntity loads a SWbemObject into a struct pointer.
+func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) {
+ v := reflect.ValueOf(dst).Elem()
+ for i := 0; i < v.NumField(); i++ {
+ f := v.Field(i)
+ of := f
+ isPtr := f.Kind() == reflect.Ptr
+ if isPtr {
+ ptr := reflect.New(f.Type().Elem())
+ f.Set(ptr)
+ f = f.Elem()
+ }
+ n := v.Type().Field(i).Name
+ if !f.CanSet() {
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "CanSet() is false",
+ }
+ }
+ prop, err := oleutil.GetProperty(src, n)
+ if err != nil {
+ if !c.AllowMissingFields {
+ errFieldMismatch = &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "no such struct field",
+ }
+ }
+ continue
+ }
+ defer prop.Clear()
+
+ switch val := prop.Value().(type) {
+ case int8, int16, int32, int64, int:
+ v := reflect.ValueOf(val).Int()
+ switch f.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ f.SetInt(v)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ f.SetUint(uint64(v))
+ default:
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "not an integer class",
+ }
+ }
+ case uint8, uint16, uint32, uint64:
+ v := reflect.ValueOf(val).Uint()
+ switch f.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ f.SetInt(int64(v))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ f.SetUint(v)
+ default:
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "not an integer class",
+ }
+ }
+ case string:
+ switch f.Kind() {
+ case reflect.String:
+ f.SetString(val)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ iv, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return err
+ }
+ f.SetInt(iv)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ uv, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return err
+ }
+ f.SetUint(uv)
+ case reflect.Struct:
+ switch f.Type() {
+ case timeType:
+ if len(val) == 25 {
+ mins, err := strconv.Atoi(val[22:])
+ if err != nil {
+ return err
+ }
+ val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60)
+ }
+ t, err := time.Parse("20060102150405.000000-0700", val)
+ if err != nil {
+ return err
+ }
+ f.Set(reflect.ValueOf(t))
+ }
+ }
+ case bool:
+ switch f.Kind() {
+ case reflect.Bool:
+ f.SetBool(val)
+ default:
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "not a bool",
+ }
+ }
+ default:
+ typeof := reflect.TypeOf(val)
+ if typeof == nil && (isPtr || c.NonePtrZero) {
+ if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
+ of.Set(reflect.Zero(of.Type()))
+ }
+ break
+ }
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: fmt.Sprintf("unsupported type (%T)", val),
+ }
+ }
+ }
+ return errFieldMismatch
+}
+
+type multiArgType int
+
+const (
+ multiArgTypeInvalid multiArgType = iota
+ multiArgTypeStruct
+ multiArgTypeStructPtr
+)
+
+// checkMultiArg checks that v has type []S, []*S for some struct type S.
+//
+// It returns what category the slice's elements are, and the reflect.Type
+// that represents S.
+func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
+ if v.Kind() != reflect.Slice {
+ return multiArgTypeInvalid, nil
+ }
+ elemType = v.Type().Elem()
+ switch elemType.Kind() {
+ case reflect.Struct:
+ return multiArgTypeStruct, elemType
+ case reflect.Ptr:
+ elemType = elemType.Elem()
+ if elemType.Kind() == reflect.Struct {
+ return multiArgTypeStructPtr, elemType
+ }
+ }
+ return multiArgTypeInvalid, nil
+}
+
+func oleInt64(item *ole.IDispatch, prop string) (int64, error) {
+ v, err := oleutil.GetProperty(item, prop)
+ if err != nil {
+ return 0, err
+ }
+ defer v.Clear()
+
+ i := int64(v.Val)
+ return i, nil
+}
+
+// CreateQuery returns a WQL query string that queries all columns of src. where
+// is an optional string that is appended to the query, to be used with WHERE
+// clauses. In such a case, the "WHERE" string should appear at the beginning.
+func CreateQuery(src interface{}, where string) string {
+ var b bytes.Buffer
+ b.WriteString("SELECT ")
+ s := reflect.Indirect(reflect.ValueOf(src))
+ t := s.Type()
+ if s.Kind() == reflect.Slice {
+ t = t.Elem()
+ }
+ if t.Kind() != reflect.Struct {
+ return ""
+ }
+ var fields []string
+ for i := 0; i < t.NumField(); i++ {
+ fields = append(fields, t.Field(i).Name)
+ }
+ b.WriteString(strings.Join(fields, ", "))
+ b.WriteString(" FROM ")
+ b.WriteString(t.Name())
+ b.WriteString(" " + where)
+ return b.String()
+}
diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE
new file mode 100644
index 000000000..106569e54
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Armon Dadgar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE
new file mode 100644
index 000000000..8f3fee627
--- /dev/null
+++ b/vendor/github.com/docker/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
new file mode 100644
index 000000000..8a37c1c7b
--- /dev/null
+++ b/vendor/github.com/docker/docker/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2016 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go
new file mode 100644
index 000000000..5516ed09d
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/mount/mount.go
@@ -0,0 +1,58 @@
+package mount
+
+// Type represents the type of a mount.
+type Type string
+
+const (
+ // TypeBind BIND
+ TypeBind Type = "bind"
+ // TypeVolume VOLUME
+ TypeVolume Type = "volume"
+)
+
+// Mount represents a mount (volume).
+type Mount struct {
+ Type Type `json:",omitempty"`
+ Source string `json:",omitempty"`
+ Target string `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+
+ BindOptions *BindOptions `json:",omitempty"`
+ VolumeOptions *VolumeOptions `json:",omitempty"`
+}
+
+// Propagation represents the propagation of a mount.
+type Propagation string
+
+const (
+ // PropagationRPrivate RPRIVATE
+ PropagationRPrivate Propagation = "rprivate"
+ // PropagationPrivate PRIVATE
+ PropagationPrivate Propagation = "private"
+ // PropagationRShared RSHARED
+ PropagationRShared Propagation = "rshared"
+ // PropagationShared SHARED
+ PropagationShared Propagation = "shared"
+ // PropagationRSlave RSLAVE
+ PropagationRSlave Propagation = "rslave"
+ // PropagationSlave SLAVE
+ PropagationSlave Propagation = "slave"
+)
+
+// BindOptions defines options specific to mounts of type "bind".
+type BindOptions struct {
+ Propagation Propagation `json:",omitempty"`
+}
+
+// VolumeOptions represents the options for a mount of type volume.
+type VolumeOptions struct {
+ NoCopy bool `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ DriverConfig *Driver `json:",omitempty"`
+}
+
+// Driver represents a volume driver.
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go
new file mode 100644
index 000000000..589e2cfdf
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/common.go
@@ -0,0 +1,21 @@
+package swarm
+
+import "time"
+
+// Version represents the internal object version.
+type Version struct {
+ Index uint64 `json:",omitempty"`
+}
+
+// Meta is a base object inherited by most of the other once.
+type Meta struct {
+ Version Version `json:",omitempty"`
+ CreatedAt time.Time `json:",omitempty"`
+ UpdatedAt time.Time `json:",omitempty"`
+}
+
+// Annotations represents how to describe an object.
+type Annotations struct {
+ Name string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go
new file mode 100644
index 000000000..4a84f2e53
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/container.go
@@ -0,0 +1,22 @@
+package swarm
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/mount"
+)
+
+// ContainerSpec represents the spec of a container.
+type ContainerSpec struct {
+ Image string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Command []string `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Env []string `json:",omitempty"`
+ Dir string `json:",omitempty"`
+ User string `json:",omitempty"`
+ Groups []string `json:",omitempty"`
+ TTY bool `json:",omitempty"`
+ Mounts []mount.Mount `json:",omitempty"`
+ StopGracePeriod *time.Duration `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go
new file mode 100644
index 000000000..76b0bea1b
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/network.go
@@ -0,0 +1,102 @@
+package swarm
+
+// Endpoint represents an endpoint.
+type Endpoint struct {
+ Spec EndpointSpec `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+ VirtualIPs []EndpointVirtualIP `json:",omitempty"`
+}
+
+// EndpointSpec represents the spec of an endpoint.
+type EndpointSpec struct {
+ Mode ResolutionMode `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+}
+
+// ResolutionMode represents a resolution mode.
+type ResolutionMode string
+
+const (
+ // ResolutionModeVIP VIP
+ ResolutionModeVIP ResolutionMode = "vip"
+ // ResolutionModeDNSRR DNSRR
+ ResolutionModeDNSRR ResolutionMode = "dnsrr"
+)
+
+// PortConfig represents the config of a port.
+type PortConfig struct {
+ Name string `json:",omitempty"`
+ Protocol PortConfigProtocol `json:",omitempty"`
+ // TargetPort is the port inside the container
+ TargetPort uint32 `json:",omitempty"`
+ // PublishedPort is the port on the swarm hosts
+ PublishedPort uint32 `json:",omitempty"`
+}
+
+// PortConfigProtocol represents the protocol of a port.
+type PortConfigProtocol string
+
+const (
+ // TODO(stevvooe): These should be used generally, not just for PortConfig.
+
+ // PortConfigProtocolTCP TCP
+ PortConfigProtocolTCP PortConfigProtocol = "tcp"
+ // PortConfigProtocolUDP UDP
+ PortConfigProtocolUDP PortConfigProtocol = "udp"
+)
+
+// EndpointVirtualIP represents the virtual ip of a port.
+type EndpointVirtualIP struct {
+ NetworkID string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Network represents a network.
+type Network struct {
+ ID string
+ Meta
+ Spec NetworkSpec `json:",omitempty"`
+ DriverState Driver `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkSpec represents the spec of a network.
+type NetworkSpec struct {
+ Annotations
+ DriverConfiguration *Driver `json:",omitempty"`
+ IPv6Enabled bool `json:",omitempty"`
+ Internal bool `json:",omitempty"`
+ Attachable bool `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkAttachmentConfig represents the configuration of a network attachment.
+type NetworkAttachmentConfig struct {
+ Target string `json:",omitempty"`
+ Aliases []string `json:",omitempty"`
+}
+
+// NetworkAttachment represents a network attachment.
+type NetworkAttachment struct {
+ Network Network `json:",omitempty"`
+ Addresses []string `json:",omitempty"`
+}
+
+// IPAMOptions represents ipam options.
+type IPAMOptions struct {
+ Driver Driver `json:",omitempty"`
+ Configs []IPAMConfig `json:",omitempty"`
+}
+
+// IPAMConfig represents ipam configuration.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ Range string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+}
+
+// Driver represents a network driver.
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go
new file mode 100644
index 000000000..785e76e34
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/node.go
@@ -0,0 +1,113 @@
+package swarm
+
+// Node represents a node.
+type Node struct {
+ ID string
+ Meta
+ // Spec defines the desired state of the node as specified by the user.
+ // The system will honor this and will *never* modify it.
+ Spec NodeSpec `json:",omitempty"`
+ // Description encapsulates the properties of the Node as reported by the
+ // agent.
+ Description NodeDescription `json:",omitempty"`
+ // Status provides the current status of the node, as seen by the manager.
+ Status NodeStatus `json:",omitempty"`
+ // ManagerStatus provides the current status of the node's manager
+ // component, if the node is a manager.
+ ManagerStatus *ManagerStatus `json:",omitempty"`
+}
+
+// NodeSpec represents the spec of a node.
+type NodeSpec struct {
+ Annotations
+ Role NodeRole `json:",omitempty"`
+ Availability NodeAvailability `json:",omitempty"`
+}
+
+// NodeRole represents the role of a node.
+type NodeRole string
+
+const (
+ // NodeRoleWorker WORKER
+ NodeRoleWorker NodeRole = "worker"
+ // NodeRoleManager MANAGER
+ NodeRoleManager NodeRole = "manager"
+)
+
+// NodeAvailability represents the availability of a node.
+type NodeAvailability string
+
+const (
+ // NodeAvailabilityActive ACTIVE
+ NodeAvailabilityActive NodeAvailability = "active"
+ // NodeAvailabilityPause PAUSE
+ NodeAvailabilityPause NodeAvailability = "pause"
+ // NodeAvailabilityDrain DRAIN
+ NodeAvailabilityDrain NodeAvailability = "drain"
+)
+
+// NodeDescription represents the description of a node.
+type NodeDescription struct {
+ Hostname string `json:",omitempty"`
+ Platform Platform `json:",omitempty"`
+ Resources Resources `json:",omitempty"`
+ Engine EngineDescription `json:",omitempty"`
+}
+
+// Platform represents the platfrom (Arch/OS).
+type Platform struct {
+ Architecture string `json:",omitempty"`
+ OS string `json:",omitempty"`
+}
+
+// EngineDescription represents the description of an engine.
+type EngineDescription struct {
+ EngineVersion string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Plugins []PluginDescription `json:",omitempty"`
+}
+
+// PluginDescription represents the description of an engine plugin.
+type PluginDescription struct {
+ Type string `json:",omitempty"`
+ Name string `json:",omitempty"`
+}
+
+// NodeStatus represents the status of a node.
+type NodeStatus struct {
+ State NodeState `json:",omitempty"`
+ Message string `json:",omitempty"`
+}
+
+// Reachability represents the reachability of a node.
+type Reachability string
+
+const (
+ // ReachabilityUnknown UNKNOWN
+ ReachabilityUnknown Reachability = "unknown"
+ // ReachabilityUnreachable UNREACHABLE
+ ReachabilityUnreachable Reachability = "unreachable"
+ // ReachabilityReachable REACHABLE
+ ReachabilityReachable Reachability = "reachable"
+)
+
+// ManagerStatus represents the status of a manager.
+type ManagerStatus struct {
+ Leader bool `json:",omitempty"`
+ Reachability Reachability `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// NodeState represents the state of a node.
+type NodeState string
+
+const (
+ // NodeStateUnknown UNKNOWN
+ NodeStateUnknown NodeState = "unknown"
+ // NodeStateDown DOWN
+ NodeStateDown NodeState = "down"
+ // NodeStateReady READY
+ NodeStateReady NodeState = "ready"
+ // NodeStateDisconnected DISCONNECTED
+ NodeStateDisconnected NodeState = "disconnected"
+)
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go
new file mode 100644
index 000000000..004638736
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/service.go
@@ -0,0 +1,105 @@
+package swarm
+
+import "time"
+
+// Service represents a service.
+type Service struct {
+ ID string
+ Meta
+ Spec ServiceSpec `json:",omitempty"`
+ PreviousSpec *ServiceSpec `json:",omitempty"`
+ Endpoint Endpoint `json:",omitempty"`
+ UpdateStatus UpdateStatus `json:",omitempty"`
+}
+
+// ServiceSpec represents the spec of a service.
+type ServiceSpec struct {
+ Annotations
+
+ // TaskTemplate defines how the service should construct new tasks when
+ // orchestrating this service.
+ TaskTemplate TaskSpec `json:",omitempty"`
+ Mode ServiceMode `json:",omitempty"`
+ UpdateConfig *UpdateConfig `json:",omitempty"`
+
+ // Networks field in ServiceSpec is deprecated. The
+ // same field in TaskSpec should be used instead.
+ // This field will be removed in a future release.
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+ EndpointSpec *EndpointSpec `json:",omitempty"`
+}
+
+// ServiceMode represents the mode of a service.
+type ServiceMode struct {
+ Replicated *ReplicatedService `json:",omitempty"`
+ Global *GlobalService `json:",omitempty"`
+}
+
+// UpdateState is the state of a service update.
+type UpdateState string
+
+const (
+ // UpdateStateUpdating is the updating state.
+ UpdateStateUpdating UpdateState = "updating"
+ // UpdateStatePaused is the paused state.
+ UpdateStatePaused UpdateState = "paused"
+ // UpdateStateCompleted is the completed state.
+ UpdateStateCompleted UpdateState = "completed"
+)
+
+// UpdateStatus reports the status of a service update.
+type UpdateStatus struct {
+ State UpdateState `json:",omitempty"`
+ StartedAt time.Time `json:",omitempty"`
+ CompletedAt time.Time `json:",omitempty"`
+ Message string `json:",omitempty"`
+}
+
+// ReplicatedService is a kind of ServiceMode.
+type ReplicatedService struct {
+ Replicas *uint64 `json:",omitempty"`
+}
+
+// GlobalService is a kind of ServiceMode.
+type GlobalService struct{}
+
+const (
+ // UpdateFailureActionPause PAUSE
+ UpdateFailureActionPause = "pause"
+ // UpdateFailureActionContinue CONTINUE
+ UpdateFailureActionContinue = "continue"
+)
+
+// UpdateConfig represents the update configuration.
+type UpdateConfig struct {
+ // Maximum number of tasks to be updated in one iteration.
+ // 0 means unlimited parallelism.
+ Parallelism uint64 `json:",omitempty"`
+
+ // Amount of time between updates.
+ Delay time.Duration `json:",omitempty"`
+
+ // FailureAction is the action to take when an update failures.
+ FailureAction string `json:",omitempty"`
+
+ // Monitor indicates how long to monitor a task for failure after it is
+ // created. If the task fails by ending up in one of the states
+ // REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
+ // this counts as a failure. If it fails after Monitor, it does not
+ // count as a failure. If Monitor is unspecified, a default value will
+ // be used.
+ Monitor time.Duration `json:",omitempty"`
+
+ // MaxFailureRatio is the fraction of tasks that may fail during
+ // an update before the failure action is invoked. Any task created by
+ // the current update which ends up in one of the states REJECTED,
+ // COMPLETED or FAILED within Monitor from its creation counts as a
+ // failure. The number of failures is divided by the number of tasks
+ // being updated, and if this fraction is greater than
+ // MaxFailureRatio, the failure action is invoked.
+ //
+ // If the failure action is CONTINUE, there is no effect.
+ // If the failure action is PAUSE, no more tasks will be updated until
+ // another update is started.
+ MaxFailureRatio float32
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
new file mode 100644
index 000000000..e96d331ae
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
@@ -0,0 +1,178 @@
+package swarm
+
+import "time"
+
+// ClusterInfo represents info about the cluster for outputing in "info"
+// it contains the same information as "Swarm", but without the JoinTokens
+type ClusterInfo struct {
+ ID string
+ Meta
+ Spec Spec
+}
+
+// Swarm represents a swarm.
+type Swarm struct {
+ ClusterInfo
+ JoinTokens JoinTokens
+}
+
+// JoinTokens contains the tokens workers and managers need to join the swarm.
+type JoinTokens struct {
+ // Worker is the join token workers may use to join the swarm.
+ Worker string
+ // Manager is the join token managers may use to join the swarm.
+ Manager string
+}
+
+// Spec represents the spec of a swarm.
+type Spec struct {
+ Annotations
+
+ Orchestration OrchestrationConfig `json:",omitempty"`
+ Raft RaftConfig `json:",omitempty"`
+ Dispatcher DispatcherConfig `json:",omitempty"`
+ CAConfig CAConfig `json:",omitempty"`
+ TaskDefaults TaskDefaults `json:",omitempty"`
+}
+
+// OrchestrationConfig represents orchestration configuration.
+type OrchestrationConfig struct {
+ // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
+ // node. If negative, never remove completed or failed tasks.
+ TaskHistoryRetentionLimit *int64 `json:",omitempty"`
+}
+
+// TaskDefaults parameterizes cluster-level task creation with default values.
+type TaskDefaults struct {
+ // LogDriver selects the log driver to use for tasks created in the
+ // orchestrator if unspecified by a service.
+ //
+ // Updating this value will only have an affect on new tasks. Old tasks
+ // will continue use their previously configured log driver until
+ // recreated.
+ LogDriver *Driver `json:",omitempty"`
+}
+
+// RaftConfig represents raft configuration.
+type RaftConfig struct {
+ // SnapshotInterval is the number of log entries between snapshots.
+ SnapshotInterval uint64 `json:",omitempty"`
+
+ // KeepOldSnapshots is the number of snapshots to keep beyond the
+ // current snapshot.
+ KeepOldSnapshots uint64 `json:",omitempty"`
+
+ // LogEntriesForSlowFollowers is the number of log entries to keep
+ // around to sync up slow followers after a snapshot is created.
+ LogEntriesForSlowFollowers uint64 `json:",omitempty"`
+
+ // ElectionTick is the number of ticks that a follower will wait for a message
+ // from the leader before becoming a candidate and starting an election.
+ // ElectionTick must be greater than HeartbeatTick.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ ElectionTick int
+
+ // HeartbeatTick is the number of ticks between heartbeats. Every
+ // HeartbeatTick ticks, the leader will send a heartbeat to the
+ // followers.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ HeartbeatTick int
+}
+
+// DispatcherConfig represents dispatcher configuration.
+type DispatcherConfig struct {
+ // HeartbeatPeriod defines how often agent should send heartbeats to
+ // dispatcher.
+ HeartbeatPeriod time.Duration `json:",omitempty"`
+}
+
+// CAConfig represents CA configuration.
+type CAConfig struct {
+ // NodeCertExpiry is the duration certificates should be issued for
+ NodeCertExpiry time.Duration `json:",omitempty"`
+
+ // ExternalCAs is a list of CAs to which a manager node will make
+ // certificate signing requests for node certificates.
+ ExternalCAs []*ExternalCA `json:",omitempty"`
+}
+
+// ExternalCAProtocol represents type of external CA.
+type ExternalCAProtocol string
+
+// ExternalCAProtocolCFSSL CFSSL
+const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
+
+// ExternalCA defines external CA to be used by the cluster.
+type ExternalCA struct {
+ // Protocol is the protocol used by this external CA.
+ Protocol ExternalCAProtocol
+
+ // URL is the URL where the external CA can be reached.
+ URL string
+
+ // Options is a set of additional key/value pairs whose interpretation
+ // depends on the specified CA type.
+ Options map[string]string `json:",omitempty"`
+}
+
+// InitRequest is the request used to init a swarm.
+type InitRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ ForceNewCluster bool
+ Spec Spec
+}
+
+// JoinRequest is the request used to join a swarm.
+type JoinRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ RemoteAddrs []string
+ JoinToken string // accept by secret
+}
+
+// LocalNodeState represents the state of the local node.
+type LocalNodeState string
+
+const (
+ // LocalNodeStateInactive INACTIVE
+ LocalNodeStateInactive LocalNodeState = "inactive"
+ // LocalNodeStatePending PENDING
+ LocalNodeStatePending LocalNodeState = "pending"
+ // LocalNodeStateActive ACTIVE
+ LocalNodeStateActive LocalNodeState = "active"
+ // LocalNodeStateError ERROR
+ LocalNodeStateError LocalNodeState = "error"
+)
+
+// Info represents generic information about swarm.
+type Info struct {
+ NodeID string
+ NodeAddr string
+
+ LocalNodeState LocalNodeState
+ ControlAvailable bool
+ Error string
+
+ RemoteManagers []Peer
+ Nodes int
+ Managers int
+
+ Cluster ClusterInfo
+}
+
+// Peer represents a peer.
+type Peer struct {
+ NodeID string
+ Addr string
+}
+
+// UpdateFlags contains flags for SwarmUpdate.
+type UpdateFlags struct {
+ RotateWorkerToken bool
+ RotateManagerToken bool
+}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go
new file mode 100644
index 000000000..bb28eec25
--- /dev/null
+++ b/vendor/github.com/docker/docker/api/types/swarm/task.go
@@ -0,0 +1,121 @@
+package swarm
+
+import "time"
+
+// TaskState represents the state of a task.
+type TaskState string
+
+const (
+ // TaskStateNew NEW
+ TaskStateNew TaskState = "new"
+ // TaskStateAllocated ALLOCATED
+ TaskStateAllocated TaskState = "allocated"
+ // TaskStatePending PENDING
+ TaskStatePending TaskState = "pending"
+ // TaskStateAssigned ASSIGNED
+ TaskStateAssigned TaskState = "assigned"
+ // TaskStateAccepted ACCEPTED
+ TaskStateAccepted TaskState = "accepted"
+ // TaskStatePreparing PREPARING
+ TaskStatePreparing TaskState = "preparing"
+ // TaskStateReady READY
+ TaskStateReady TaskState = "ready"
+ // TaskStateStarting STARTING
+ TaskStateStarting TaskState = "starting"
+ // TaskStateRunning RUNNING
+ TaskStateRunning TaskState = "running"
+ // TaskStateComplete COMPLETE
+ TaskStateComplete TaskState = "complete"
+ // TaskStateShutdown SHUTDOWN
+ TaskStateShutdown TaskState = "shutdown"
+ // TaskStateFailed FAILED
+ TaskStateFailed TaskState = "failed"
+ // TaskStateRejected REJECTED
+ TaskStateRejected TaskState = "rejected"
+)
+
+// Task represents a task.
+type Task struct {
+ ID string
+ Meta
+ Annotations
+
+ Spec TaskSpec `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ Slot int `json:",omitempty"`
+ NodeID string `json:",omitempty"`
+ Status TaskStatus `json:",omitempty"`
+ DesiredState TaskState `json:",omitempty"`
+ NetworksAttachments []NetworkAttachment `json:",omitempty"`
+}
+
+// TaskSpec represents the spec of a task.
+type TaskSpec struct {
+ ContainerSpec ContainerSpec `json:",omitempty"`
+ Resources *ResourceRequirements `json:",omitempty"`
+ RestartPolicy *RestartPolicy `json:",omitempty"`
+ Placement *Placement `json:",omitempty"`
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+
+ // LogDriver specifies the LogDriver to use for tasks created from this
+ // spec. If not present, the one on cluster default on swarm.Spec will be
+ // used, finally falling back to the engine default if not specified.
+ LogDriver *Driver `json:",omitempty"`
+
+ // ForceUpdate is a counter that triggers an update even if no relevant
+ // parameters have been changed.
+ ForceUpdate uint64
+}
+
+// Resources represents resources (CPU/Memory).
+type Resources struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+}
+
+// ResourceRequirements represents resources requirements.
+type ResourceRequirements struct {
+ Limits *Resources `json:",omitempty"`
+ Reservations *Resources `json:",omitempty"`
+}
+
+// Placement represents orchestration parameters.
+type Placement struct {
+ Constraints []string `json:",omitempty"`
+}
+
+// RestartPolicy represents the restart policy.
+type RestartPolicy struct {
+ Condition RestartPolicyCondition `json:",omitempty"`
+ Delay *time.Duration `json:",omitempty"`
+ MaxAttempts *uint64 `json:",omitempty"`
+ Window *time.Duration `json:",omitempty"`
+}
+
+// RestartPolicyCondition represents when to restart.
+type RestartPolicyCondition string
+
+const (
+ // RestartPolicyConditionNone NONE
+ RestartPolicyConditionNone RestartPolicyCondition = "none"
+ // RestartPolicyConditionOnFailure ON_FAILURE
+ RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
+ // RestartPolicyConditionAny ANY
+ RestartPolicyConditionAny RestartPolicyCondition = "any"
+)
+
+// TaskStatus represents the status of a task.
+type TaskStatus struct {
+ Timestamp time.Time `json:",omitempty"`
+ State TaskState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Err string `json:",omitempty"`
+ ContainerStatus ContainerStatus `json:",omitempty"`
+}
+
+// ContainerStatus represents the status of a container.
+type ContainerStatus struct {
+ ContainerID string `json:",omitempty"`
+ PID int `json:",omitempty"`
+ ExitCode int `json:",omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go
new file mode 100644
index 000000000..ad1675923
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts.go
@@ -0,0 +1,148 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+var (
+ // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
+ // These are the IANA registered port numbers for use with Docker
+ // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
+ DefaultHTTPPort = 2375 // Default HTTP Port
+ // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
+ DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
+ // DefaultUnixSocket Path for the unix socket.
+ // Docker daemon by default always listens on the default unix socket
+ DefaultUnixSocket = "/var/run/docker.sock"
+ // DefaultTCPHost constant defines the default host string used by docker on Windows
+ DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
+ // DefaultTLSHost constant defines the default host string used by docker for TLS sockets
+ DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
+ // DefaultNamedPipe defines the default named pipe used by docker on Windows
+ DefaultNamedPipe = `//./pipe/docker_engine`
+)
+
+// ValidateHost validates that the specified string is a valid host and returns it.
+func ValidateHost(val string) (string, error) {
+ host := strings.TrimSpace(val)
+ // The empty string means default and is not handled by parseDockerDaemonHost
+ if host != "" {
+ _, err := parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ // Note: unlike most flag validators, we don't return the mutated value here
+ // we need to know what the user entered later (using ParseHost) to adjust for tls
+ return val, nil
+}
+
+// ParseHost and set defaults for a Daemon host string
+func ParseHost(defaultToTLS bool, val string) (string, error) {
+ host := strings.TrimSpace(val)
+ if host == "" {
+ if defaultToTLS {
+ host = DefaultTLSHost
+ } else {
+ host = DefaultHost
+ }
+ } else {
+ var err error
+ host, err = parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ return host, nil
+}
+
+// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
+// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
+func parseDockerDaemonHost(addr string) (string, error) {
+ addrParts := strings.Split(addr, "://")
+ if len(addrParts) == 1 && addrParts[0] != "" {
+ addrParts = []string{"tcp", addrParts[0]}
+ }
+
+ switch addrParts[0] {
+ case "tcp":
+ return parseTCPAddr(addrParts[1], DefaultTCPHost)
+ case "unix":
+ return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
+ case "npipe":
+ return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
+ case "fd":
+ return addr, nil
+ default:
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+}
+
+// parseSimpleProtoAddr parses and validates that the specified address is a valid
+// socket address for simple protocols like unix and npipe. It returns a formatted
+// socket address, either using the address parsed from addr, or the contents of
+// defaultAddr if addr is a blank string.
+func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
+ addr = strings.TrimPrefix(addr, proto+"://")
+ if strings.Contains(addr, "://") {
+ return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
+ }
+ if addr == "" {
+ addr = defaultAddr
+ }
+ return fmt.Sprintf("%s://%s", proto, addr), nil
+}
+
+// parseTCPAddr parses and validates that the specified address is a valid TCP
+// address. It returns a formatted TCP address, either using the address parsed
+// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
+// tryAddr is expected to have already been Trim()'d
+// defaultAddr must be in the full `tcp://host:port` form
+func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
+ if tryAddr == "" || tryAddr == "tcp://" {
+ return defaultAddr, nil
+ }
+ addr := strings.TrimPrefix(tryAddr, "tcp://")
+ if strings.Contains(addr, "://") || addr == "" {
+ return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
+ }
+
+ defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
+ defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
+ if err != nil {
+ return "", err
+ }
+ // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
+ // not 1.4. See https://github.com/golang/go/issues/12200 and
+ // https://github.com/golang/go/issues/6530.
+ if strings.HasSuffix(addr, "]:") {
+ addr += defaultPort
+ }
+
+ u, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", err
+ }
+
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ if host == "" {
+ host = defaultHost
+ }
+ if port == "" {
+ port = defaultPort
+ }
+ p, err := strconv.Atoi(port)
+ if err != nil && p == 0 {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go
new file mode 100644
index 000000000..611407a9d
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package opts
+
+import "fmt"
+
+// DefaultHost constant defines the default host string used by docker on other hosts than Windows
+var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go
new file mode 100644
index 000000000..7c239e00f
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_windows.go
@@ -0,0 +1,6 @@
+// +build windows
+
+package opts
+
+// DefaultHost constant defines the default host string used by docker on Windows
+var DefaultHost = "npipe://" + DefaultNamedPipe
diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go
new file mode 100644
index 000000000..c7b0dc994
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ip.go
@@ -0,0 +1,42 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+)
+
+// IPOpt holds an IP. It is used to store values from CLI flags.
+type IPOpt struct {
+ *net.IP
+}
+
+// NewIPOpt creates a new IPOpt from a reference net.IP and a
+// string representation of an IP. If the string is not a valid
+// IP it will fallback to the specified reference.
+func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
+ o := &IPOpt{
+ IP: ref,
+ }
+ o.Set(defaultVal)
+ return o
+}
+
+// Set sets an IPv4 or IPv6 address from a given string. If the given
+// string is not parseable as an IP address it returns an error.
+func (o *IPOpt) Set(val string) error {
+ ip := net.ParseIP(val)
+ if ip == nil {
+ return fmt.Errorf("%s is not an ip address", val)
+ }
+ *o.IP = ip
+ return nil
+}
+
+// String returns the IP address stored in the IPOpt. If stored IP is a
+// nil pointer, it returns an empty string.
+func (o *IPOpt) String() string {
+ if *o.IP == nil {
+ return ""
+ }
+ return o.IP.String()
+}
diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go
new file mode 100644
index 000000000..0b0998177
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts.go
@@ -0,0 +1,274 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "regexp"
+ "strings"
+)
+
+var (
+ alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
+ domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+)
+
+// ListOpts holds a list of values and a validation function.
+type ListOpts struct {
+ values *[]string
+ validator ValidatorFctType
+}
+
+// NewListOpts creates a new ListOpts with the specified validator.
+func NewListOpts(validator ValidatorFctType) ListOpts {
+ var values []string
+ return *NewListOptsRef(&values, validator)
+}
+
+// NewListOptsRef creates a new ListOpts with the specified values and validator.
+func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+ return &ListOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+func (opts *ListOpts) String() string {
+ return fmt.Sprintf("%v", []string((*opts.values)))
+}
+
+// Set validates if needed the input value and adds it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ (*opts.values) = append((*opts.values), value)
+ return nil
+}
+
+// Delete removes the specified element from the slice.
+func (opts *ListOpts) Delete(key string) {
+ for i, k := range *opts.values {
+ if k == key {
+ (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
+ return
+ }
+ }
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+func (opts *ListOpts) GetMap() map[string]struct{} {
+ ret := make(map[string]struct{})
+ for _, k := range *opts.values {
+ ret[k] = struct{}{}
+ }
+ return ret
+}
+
+// GetAll returns the values of slice.
+func (opts *ListOpts) GetAll() []string {
+ return (*opts.values)
+}
+
+// GetAllOrEmpty returns the values of the slice
+// or an empty slice when there are no values.
+func (opts *ListOpts) GetAllOrEmpty() []string {
+ v := *opts.values
+ if v == nil {
+ return make([]string, 0)
+ }
+ return v
+}
+
+// Get checks the existence of the specified key.
+func (opts *ListOpts) Get(key string) bool {
+ for _, k := range *opts.values {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+ return len((*opts.values))
+}
+
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+ Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+ name string
+ ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+ return &NamedListOpts{
+ name: name,
+ ListOpts: *NewListOptsRef(values, validator),
+ }
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+ return o.name
+}
+
+//MapOpts holds a map of values and a validation function.
+type MapOpts struct {
+ values map[string]string
+ validator ValidatorFctType
+}
+
+// Set validates if needed the input value and add it to the
+// internal map, by splitting on '='.
+func (opts *MapOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ vals := strings.SplitN(value, "=", 2)
+ if len(vals) == 1 {
+ (opts.values)[vals[0]] = ""
+ } else {
+ (opts.values)[vals[0]] = vals[1]
+ }
+ return nil
+}
+
+// GetAll returns the values of MapOpts as a map.
+func (opts *MapOpts) GetAll() map[string]string {
+ return opts.values
+}
+
+func (opts *MapOpts) String() string {
+ return fmt.Sprintf("%v", map[string]string((opts.values)))
+}
+
+// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
+func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
+ if values == nil {
+ values = make(map[string]string)
+ }
+ return &MapOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+ name string
+ MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+ return &NamedMapOpts{
+ name: name,
+ MapOpts: *NewMapOpts(values, validator),
+ }
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+ return o.name
+}
+
+// ValidatorFctType defines a validator function that returns a validated string and/or an error.
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
+
+// ValidateIPAddress validates an Ip address.
+func ValidateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateDNSSearch validates domain for resolvconf search configuration.
+// A zero length domain is represented by a dot (.).
+func ValidateDNSSearch(val string) (string, error) {
+ if val = strings.Trim(val, " "); val == "." {
+ return val, nil
+ }
+ return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
+ if alphaRegexp.FindString(val) == "" {
+ return "", fmt.Errorf("%s is not a valid domain", val)
+ }
+ ns := domainRegexp.FindSubmatch([]byte(val))
+ if len(ns) > 0 && len(ns[1]) < 255 {
+ return string(ns[1]), nil
+ }
+ return "", fmt.Errorf("%s is not a valid domain", val)
+}
+
+// ValidateLabel validates that the specified string is a valid label, and returns it.
+// Labels are in the form on key=value.
+func ValidateLabel(val string) (string, error) {
+ if strings.Count(val, "=") < 1 {
+ return "", fmt.Errorf("bad attribute format: %s", val)
+ }
+ return val, nil
+}
+
+// ValidateSysctl validates an sysctl and returns it.
+func ValidateSysctl(val string) (string, error) {
+ validSysctlMap := map[string]bool{
+ "kernel.msgmax": true,
+ "kernel.msgmnb": true,
+ "kernel.msgmni": true,
+ "kernel.sem": true,
+ "kernel.shmall": true,
+ "kernel.shmmax": true,
+ "kernel.shmmni": true,
+ "kernel.shm_rmid_forced": true,
+ }
+ validSysctlPrefixes := []string{
+ "net.",
+ "fs.mqueue.",
+ }
+ arr := strings.Split(val, "=")
+ if len(arr) < 2 {
+ return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
+ }
+ if validSysctlMap[arr[0]] {
+ return val, nil
+ }
+
+ for _, vp := range validSysctlPrefixes {
+ if strings.HasPrefix(arr[0], vp) {
+ return val, nil
+ }
+ }
+ return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
+}
diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go
new file mode 100644
index 000000000..f1ce844a8
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package opts
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
+const DefaultHTTPHost = "localhost"
diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go
new file mode 100644
index 000000000..ebe40c969
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_windows.go
@@ -0,0 +1,56 @@
+package opts
+
+// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
+// @jhowardmsft, @swernli.
+//
+// On Windows, this mitigates a problem with the default options of running
+// a docker client against a local docker daemon on TP5.
+//
+// What was found that if the default host is "localhost", even if the client
+// (and daemon as this is local) is not physically on a network, and the DNS
+// cache is flushed (ipconfig /flushdns), then the client will pause for
+// exactly one second when connecting to the daemon for calls. For example
+// using docker run windowsservercore cmd, the CLI will send a create followed
+// by an attach. You see the delay between the attach finishing and the attach
+// being seen by the daemon.
+//
+// Here's some daemon debug logs with additional debug spew put in. The
+// AfterWriteJSON log is the very last thing the daemon does as part of the
+// create call. The POST /attach is the second CLI call. Notice the second
+// time gap.
+//
+// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
+// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
+// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
+// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
+// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
+// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
+// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
+// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
+// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
+// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
+// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
+// ... 1 second gap here....
+// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
+// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
+//
+// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
+// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
+// the Windows networking stack is supposed to resolve "localhost" internally,
+// without hitting DNS, or even reading the hosts file (which is why localhost
+// is commented out in the hosts file on Windows).
+//
+// We have validated that working around this using the actual IPv4 localhost
+// address does not cause the delay.
+//
+// This does not occur with the docker client built with 1.4.3 on the same
+// Windows build, regardless of whether the daemon is built using 1.5.1
+// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
+// on a cross-compiled Windows binary (from Linux).
+//
+// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
+// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
+// explicitly.
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
+const DefaultHTTPHost = "127.0.0.1"
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 000000000..7307d9694
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 000000000..1603a2302
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1096 @@
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/promise"
+ "github.com/docker/docker/pkg/system"
+)
+
+type (
+ // Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
+ Archive io.ReadCloser
+ // Reader is a type of io.Reader.
+ Reader io.Reader
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // TarChownOptions wraps the chown options UID and GID.
+ TarChownOptions struct {
+ UID, GID int
+ }
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *TarChownOptions
+ IncludeSourceDir bool
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ }
+
+ // Archiver allows the reuse of most utility functions of this package
+ // with a pluggable Untar function. Also, to facilitate the passing of
+ // specific id mappings for untar, an archiver can be created with maps
+ // which will then be passed to Untar operations
+ Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ }
+
+ // breakoutError is used to differentiate errors related to breaking out
+ // When testing archive breakout in the unit tests, this error is expected
+ // in order for the test to pass.
+ breakoutError error
+)
+
+var (
+ // ErrNotImplemented is the error message of function not implemented.
+ ErrNotImplemented = errors.New("Function not implemented")
+ defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
+)
+
+const (
+ // HeaderSize is the size in bytes of a tar header
+ HeaderSize = 512
+)
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+// IsArchive checks for the magic bytes of a tar or any supported compression
+// algorithm.
+func IsArchive(header []byte) bool {
+ compression := DetectCompression(header)
+ if compression != Uncompressed {
+ return true
+ }
+ r := tar.NewReader(bytes.NewBuffer(header))
+ _, err := r.Next()
+ return err == nil
+}
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debugf("Len too short")
+ continue
+ }
+ if bytes.Compare(m, source[:len(m)]) == 0 {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ gzReader, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return readBufWrapper, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ xzReader, chdone, err := xzDecompress(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
+ <-chdone
+ return readBufWrapper.Close()
+ }), nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresseses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+ name, err := CanonicalTarNameForPath(name)
+ if err != nil {
+ return "", err
+ }
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name, nil
+}
+
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ link := ""
+ if fi.Mode()&os.ModeSymlink != 0 {
+ if link, err = os.Readlink(path); err != nil {
+ return err
+ }
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return err
+ }
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ name, err = canonicalTarName(name, fi.IsDir())
+ if err != nil {
+ return fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ }
+ hdr.Name = name
+
+ inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
+ if err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hardlinked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) {
+ uid, gid, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
+ if err != nil {
+ return err
+ }
+ xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ hdr.Gid = xGID
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debugf("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ var errors []string
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ // We ignore errors here because not all graphdrivers support xattrs.
+ errors = append(errors, err.Error())
+ }
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
+
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(compressWriter),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ UIDMaps: options.UIDMaps,
+ GIDMaps: options.GIDMaps,
+ }
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (eg !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !exceptions {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range patterns {
+ if pat[0] != '!' {
+ continue
+ }
+ pat = pat[1:] + string(filepath.Separator)
+ if strings.HasPrefix(pat, dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return err
+ }
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ // if the options contain a uid & gid maps, convert header uid/gid
+ // entries using the maps such that lchown sets the proper mapped
+ // uid/gid after writing the file. We only perform this mapping if
+ // the file isn't already owned by the remapped root UID or GID, as
+ // that specific uid/gid has no mapping from container -> host, and
+ // those files already have the proper ownership for inside the
+ // container.
+ if hdr.Uid != remappedRootUID {
+ xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ }
+ if hdr.Gid != remappedRootGID {
+ xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = xGID
+ }
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+
+ var options *TarOptions
+ if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
+ options = &TarOptions{
+ UIDMaps: archiver.UIDMaps,
+ GIDMaps: archiver.GIDMaps,
+ }
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func TarUntar(src, dst string) error {
+ return defaultArchiver.TarUntar(src, dst)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ var options *TarOptions
+ if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
+ options = &TarOptions{
+ UIDMaps: archiver.UIDMaps,
+ GIDMaps: archiver.GIDMaps,
+ }
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+ return defaultArchiver.UntarPath(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func CopyWithTar(src, dst string) error {
+ return defaultArchiver.CopyWithTar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := promise.Go(func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
+
+ // only perform mapping if the file being copied isn't already owned by the
+ // uid or gid of the remapped root in the container
+ if remappedRootUID != hdr.Uid {
+ xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ }
+ if remappedRootGID != hdr.Gid {
+ xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = xGID
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ })
+ defer func() {
+ if er := <-errC; err != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+//
+// Destination handling is in an operating specific manner depending
+// where the daemon is running. If `dst` ends with a trailing slash
+// the final destination path will be `dst/base(src)` (Linux) or
+// `dst\base(src)` (Windows).
+func CopyFileWithTar(src, dst string) (err error) {
+ return defaultArchiver.CopyFileWithTar(src, dst)
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ chdone := make(chan struct{})
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, nil, err
+ }
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(chdone)
+ }()
+
+ return pipeR, chdone, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 000000000..fbc3bb8c4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,112 @@
+// +build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return srcPath + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ return p, nil // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ err = errors.New("cannot convert stat value to syscall.Stat_t")
+ return
+ }
+
+ inode = uint64(s.Ino)
+
+ // Currently go does not fill in the major/minors
+ if s.Mode&syscall.S_IFBLK != 0 ||
+ s.Mode&syscall.S_IFCHR != 0 {
+ hdr.Devmajor = int64(major(uint64(s.Rdev)))
+ hdr.Devminor = int64(minor(uint64(s.Rdev)))
+ }
+
+ return
+}
+
+func getFileUIDGID(stat interface{}) (int, int, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return int(s.Uid), int(s.Gid), nil
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= syscall.S_IFBLK
+ case tar.TypeChar:
+ mode |= syscall.S_IFCHR
+ case tar.TypeFifo:
+ mode |= syscall.S_IFIFO
+ }
+
+ if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+ return err
+ }
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 000000000..5c3a1be34
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,70 @@
+// +build windows
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ // windows: convert windows style relative path with backslashes
+ // into forward slashes. Since windows does not allow '/' or '\'
+ // in file names, it is mostly safe to replace however we must
+ // check just in case
+ if strings.Contains(p, "/") {
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ }
+ return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ perm &= 0755
+ // Add the x bit: make everything +x from windows
+ perm |= 0111
+
+ return perm
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (int, int, error) {
+ // no notion of file ownership mapping yet on Windows
+ return 0, 0, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 000000000..81651c61d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,416 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify = iota
+ // ChangeAdd represents the add operation.
+ ChangeAdd
+ // ChangeDelete represents the delete operation.
+ ChangeDelete
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ // Skip AUFS metadata
+ if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
+ return err
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ // Find out what kind of modification happened
+ file := filepath.Base(path)
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(file, WhiteoutPrefix) {
+ originalFile := file[len(WhiteoutPrefix):]
+ change.Path = filepath.Join(filepath.Dir(path), originalFile)
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.StatT
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild, _ := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ bytes.Compare(oldChild.capability, newChild.capability) != 0 {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ UIDMaps: uidMaps,
+ GIDMaps: gidMaps,
+ }
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 000000000..dee8b7c60
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,285 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of syscall.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 000000000..da70ed37c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 000000000..3778b732c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,36 @@
+// +build !windows
+
+package archive
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return uint64(fi.Sys().(*syscall.Stat_t).Ino)
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 000000000..af94243fc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,30 @@
+package archive
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.ModTime() != newStat.ModTime() ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 000000000..e1fa73f37
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,458 @@
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/system"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in a path separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = normalizePath(cleanedPath)
+ originalPath = normalizePath(originalPath)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(filepath.Separator)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
+ cleanedPath += string(filepath.Separator)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string) bool {
+ return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string) bool {
+ return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(normalizePath(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(filepath.Separator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between it's directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+
+ filter := []string{sourceBase}
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+
+ return TarWithOptions(sourceDir, &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ })
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Lstat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 000000000..e305b5e4a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 000000000..2b775b45c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 000000000..1b08ad33a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,279 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return 0, err
+ }
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600)
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ // if the options contain a uid & gid maps, convert header uid/gid
+ // entries using the maps such that lchown sets the proper mapped
+ // uid/gid after writing the file. We only perform this mapping if
+ // the file isn't already owned by the remapped root UID or GID, as
+ // that specific uid/gid has no mapping from container -> host, and
+ // those files already have the proper ownership for inside the
+ // container.
+ if srcHdr.Uid != remappedRootUID {
+ xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
+ if err != nil {
+ return 0, err
+ }
+ srcHdr.Uid = xUID
+ }
+ if srcHdr.Gid != remappedRootGID {
+ xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
+ if err != nil {
+ return 0, err
+ }
+ srcHdr.Gid = xGID
+ }
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+ if decompress {
+ layer, err = DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
new file mode 100644
index 000000000..cedd46a40
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/archive"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "docker-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 000000000..3448569b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 000000000..e85aac054
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 000000000..d20478a10
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 000000000..dfb335c0b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io/ioutil"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (Archive, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return ioutil.NopCloser(buf), nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
new file mode 100644
index 000000000..763d8d279
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -0,0 +1,283 @@
+package fileutils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "text/scanner"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// exclusion returns true if the specified pattern is an exclusion
+func exclusion(pattern string) bool {
+ return pattern[0] == '!'
+}
+
+// empty returns true if the specified pattern is empty
+func empty(pattern string) bool {
+ return pattern == ""
+}
+
+// CleanPatterns takes a slice of patterns returns a new
+// slice of patterns cleaned with filepath.Clean, stripped
+// of any empty patterns and lets the caller know whether the
+// slice contains any exception patterns (prefixed with !).
+func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
+ // Loop over exclusion patterns and:
+ // 1. Clean them up.
+ // 2. Indicate whether we are dealing with any exception rules.
+ // 3. Error if we see a single exclusion marker on it's own (!).
+ cleanedPatterns := []string{}
+ patternDirs := [][]string{}
+ exceptions := false
+ for _, pattern := range patterns {
+ // Eliminate leading and trailing whitespace.
+ pattern = strings.TrimSpace(pattern)
+ if empty(pattern) {
+ continue
+ }
+ if exclusion(pattern) {
+ if len(pattern) == 1 {
+ return nil, nil, false, errors.New("Illegal exclusion pattern: !")
+ }
+ exceptions = true
+ }
+ pattern = filepath.Clean(pattern)
+ cleanedPatterns = append(cleanedPatterns, pattern)
+ if exclusion(pattern) {
+ pattern = pattern[1:]
+ }
+ patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
+ }
+
+ return cleanedPatterns, patternDirs, exceptions, nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+ file = filepath.Clean(file)
+
+ if file == "." {
+ // Don't let them exclude everything, kind of silly.
+ return false, nil
+ }
+
+ patterns, patDirs, _, err := CleanPatterns(patterns)
+ if err != nil {
+ return false, err
+ }
+
+ return OptimizedMatches(file, patterns, patDirs)
+}
+
+// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
+// It will assume that the inputs have been preprocessed and therefore the function
+// doesn't need to do as much error checking and clean-up. This was done to avoid
+// repeating these steps on each file being checked during the archive process.
+// The more generic fileutils.Matches() can't make these assumptions.
+func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
+ matched := false
+ file = filepath.FromSlash(file)
+ parentPath := filepath.Dir(file)
+ parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
+
+ for i, pattern := range patterns {
+ negative := false
+
+ if exclusion(pattern) {
+ negative = true
+ pattern = pattern[1:]
+ }
+
+ match, err := regexpMatch(pattern, file)
+ if err != nil {
+ return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
+ }
+
+ if !match && parentPath != "." {
+ // Check to see if the pattern matches one of our parent dirs.
+ if len(patDirs[i]) <= len(parentPathDirs) {
+ match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
+ strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
+ }
+ }
+
+ if match {
+ matched = !negative
+ }
+ }
+
+ if matched {
+ logrus.Debugf("Skipping excluded path: %s", file)
+ }
+
+ return matched, nil
+}
+
+// regexpMatch tries to match the logic of filepath.Match but
+// does so using regexp logic. We do this so that we can expand the
+// wildcard set to include other things, like "**" to mean any number
+// of directories. This means that we should be backwards compatible
+// with filepath.Match(). We'll end up supporting more stuff, due to
+// the fact that we're using regexp, but that's ok - it does no harm.
+//
+// As per the comment in golangs filepath.Match, on Windows, escaping
+// is disabled. Instead, '\\' is treated as path separator.
+func regexpMatch(pattern, path string) (bool, error) {
+ regStr := "^"
+
+ // Do some syntax checking on the pattern.
+ // filepath's Match() has some really weird rules that are inconsistent
+ // so instead of trying to dup their logic, just call Match() for its
+ // error state and if there is an error in the pattern return it.
+ // If this becomes an issue we can remove this since its really only
+ // needed in the error (syntax) case - which isn't really critical.
+ if _, err := filepath.Match(pattern, path); err != nil {
+ return false, err
+ }
+
+ // Go through the pattern and convert it to a regexp.
+ // We use a scanner so we can support utf-8 chars.
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(pattern))
+
+ sl := string(os.PathSeparator)
+ escSL := sl
+ if sl == `\` {
+ escSL += `\`
+ }
+
+ for scan.Peek() != scanner.EOF {
+ ch := scan.Next()
+
+ if ch == '*' {
+ if scan.Peek() == '*' {
+ // is some flavor of "**"
+ scan.Next()
+
+ if scan.Peek() == scanner.EOF {
+ // is "**EOF" - to align with .gitignore just accept all
+ regStr += ".*"
+ } else {
+ // is "**"
+ regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
+ }
+
+ // Treat **/ as ** so eat the "/"
+ if string(scan.Peek()) == sl {
+ scan.Next()
+ }
+ } else {
+ // is "*" so map it to anything but "/"
+ regStr += "[^" + escSL + "]*"
+ }
+ } else if ch == '?' {
+ // "?" is any char except "/"
+ regStr += "[^" + escSL + "]"
+ } else if strings.Index(".$", string(ch)) != -1 {
+ // Escape some regexp special chars that have no meaning
+ // in golang's filepath.Match
+ regStr += `\` + string(ch)
+ } else if ch == '\\' {
+ // escape next char. Note that a trailing \ in the pattern
+ // will be left alone (but need to escape it)
+ if sl == `\` {
+ // On windows map "\" to "\\", meaning an escaped backslash,
+ // and then just continue because filepath.Match on
+ // Windows doesn't allow escaping at all
+ regStr += escSL
+ continue
+ }
+ if scan.Peek() != scanner.EOF {
+ regStr += `\` + string(scan.Next())
+ } else {
+ regStr += `\`
+ }
+ } else {
+ regStr += string(ch)
+ }
+ }
+
+ regStr += "$"
+
+ res, err := regexp.MatchString(regStr, path)
+
+ // Map regexp's error to filepath's so no one knows we're not using filepath
+ if err != nil {
+ err = filepath.ErrBadPattern
+ }
+
+ return res, err
+}
+
+// CopyFile copies from src to dst until either EOF is reached
+// on src or an error occurs. It verifies src exists and removes
+// the dst if it exists.
+func CopyFile(src, dst string) (int64, error) {
+ cleanSrc := filepath.Clean(src)
+ cleanDst := filepath.Clean(dst)
+ if cleanSrc == cleanDst {
+ return 0, nil
+ }
+ sf, err := os.Open(cleanSrc)
+ if err != nil {
+ return 0, err
+ }
+ defer sf.Close()
+ if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+ return 0, err
+ }
+ df, err := os.Create(cleanDst)
+ if err != nil {
+ return 0, err
+ }
+ defer df.Close()
+ return io.Copy(df, sf)
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+ var realPath string
+ var err error
+ if realPath, err = filepath.Abs(path); err != nil {
+ return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+ }
+ if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+ return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+ }
+ realPathInfo, err := os.Stat(realPath)
+ if err != nil {
+ return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+ }
+ if !realPathInfo.Mode().IsDir() {
+ return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+ }
+ return realPath, nil
+}
+
+// CreateIfNotExists creates a file or a directory only if it does not already exist.
+func CreateIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
new file mode 100644
index 000000000..0f2cb7ab9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors.
+// On Solaris these limits are per process and not systemwide
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
new file mode 100644
index 000000000..d5c3abf56
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
@@ -0,0 +1,22 @@
+// +build linux freebsd
+
+package fileutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
new file mode 100644
index 000000000..5ec21cace
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
+// on Windows.
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go
new file mode 100644
index 000000000..8154e83f0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go
@@ -0,0 +1,39 @@
+package homedir
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ if runtime.GOOS == "windows" {
+ return "USERPROFILE"
+ }
+ return "HOME"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ home := os.Getenv(Key())
+ if home == "" && runtime.GOOS != "windows" {
+ if u, err := user.CurrentUser(); err == nil {
+ return u.Home
+ }
+ }
+ return home
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ if runtime.GOOS == "windows" {
+ return "%USERPROFILE%" // be careful while using in format functions
+ }
+ return "~"
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
new file mode 100644
index 000000000..6bca46628
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -0,0 +1,197 @@
+package idtools
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+type subIDRange struct {
+ Start int
+ Length int
+}
+
+type ranges []subIDRange
+
+func (e ranges) Len() int { return len(e) }
+func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
+
+const (
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+)
+
+// MkdirAllAs creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
+}
+
+// MkdirAllNewAs creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership will be performed
+func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
+}
+
+// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ var uid, gid int
+
+ if uidMap != nil {
+ xUID, err := ToHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ uid = xUID
+ }
+ if gidMap != nil {
+ xGID, err := ToHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid = xGID
+ }
+ return uid, gid, nil
+}
+
+// ToContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func ToContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+ contID := m.ContainerID + (hostID - m.HostID)
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// ToHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func ToHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (contID - m.ContainerID)
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// CreateIDMappings takes a requested user and group name and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
+ subuidRanges, err := parseSubuid(username)
+ if err != nil {
+ return nil, nil, err
+ }
+ subgidRanges, err := parseSubgid(groupname)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(subuidRanges) == 0 {
+ return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
+ }
+ if len(subgidRanges) == 0 {
+ return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
+ }
+
+ return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
+}
+
+func createIDMap(subidRanges ranges) []IDMap {
+ idMap := []IDMap{}
+
+ // sort the ranges by lowest ID first
+ sort.Sort(subidRanges)
+ containerID := 0
+ for _, idrange := range subidRanges {
+ idMap = append(idMap, IDMap{
+ ContainerID: containerID,
+ HostID: idrange.Start,
+ Size: idrange.Length,
+ })
+ containerID = containerID + idrange.Length
+ }
+ return idMap
+}
+
+func parseSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func parseSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
+
+// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
+// and return all found ranges for a specified username. If the special value
+// "ALL" is supplied for username, then all ranges in the file will be returned
+func parseSubidFile(path, username string) (ranges, error) {
+ var rangeList ranges
+
+ subidFile, err := os.Open(path)
+ if err != nil {
+ return rangeList, err
+ }
+ defer subidFile.Close()
+
+ s := bufio.NewScanner(subidFile)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return rangeList, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" || strings.HasPrefix(text, "#") {
+ continue
+ }
+ parts := strings.Split(text, ":")
+ if len(parts) != 3 {
+ return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ }
+ if parts[0] == username || username == "ALL" {
+ startid, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ length, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ rangeList = append(rangeList, subIDRange{startid, length})
+ }
+ }
+ return rangeList, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
new file mode 100644
index 000000000..b57d6ef12
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package idtools
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ paths = []string{path}
+ } else if err == nil && chownExisting {
+ if err := os.Chown(path, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ // short-circuit--we were called with an existing directory and chown was requested
+ return nil
+ } else if err == nil {
+ // nothing to do; directory path fully exists already and chown was NOT requested
+ return nil
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ } else {
+ if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
new file mode 100644
index 000000000..c9e3c937c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package idtools
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// Platforms such as Windows do not support the UID/GID concept. So make this
+// just a wrapper around system.MkdirAll.
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
new file mode 100644
index 000000000..4a4aaed04
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
@@ -0,0 +1,188 @@
+package idtools
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// add a user and/or group to Linux /etc/passwd, /etc/group using standard
+// Linux distribution commands:
+// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group
+// useradd -r -s /bin/false
+
+var (
+ once sync.Once
+ userCommand string
+
+ cmdTemplates = map[string]string{
+ "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
+ "useradd": "-r -s /bin/false %s",
+ "usermod": "-%s %d-%d %s",
+ }
+
+ idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+ // default length for a UID/GID subordinate range
+ defaultRangeLen = 65536
+ defaultRangeStart = 100000
+ userMod = "usermod"
+)
+
+func resolveBinary(binname string) (string, error) {
+ binaryPath, err := exec.LookPath(binname)
+ if err != nil {
+ return "", err
+ }
+ resolvedPath, err := filepath.EvalSymlinks(binaryPath)
+ if err != nil {
+ return "", err
+ }
+ //only return no error if the final resolved binary basename
+ //matches what was searched for
+ if filepath.Base(resolvedPath) == binname {
+ return resolvedPath, nil
+ }
+ return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+}
+
+// AddNamespaceRangesUser takes a username and uses the standard system
+// utility to create a system user/group pair used to hold the
+// /etc/sub{uid,gid} ranges which will be used for user namespace
+// mapping ranges in containers.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ if err := addUser(name); err != nil {
+ return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ }
+
+ // Query the system for the created uid and gid pair
+ out, err := execCmd("id", name)
+ if err != nil {
+ return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ }
+ matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
+ if len(matches) != 3 {
+ return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ }
+ uid, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ }
+ gid, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ }
+
+ // Now we need to create the subuid/subgid ranges for our new user/group (system users
+ // do not get auto-created ranges in subuid/subgid)
+
+ if err := createSubordinateRanges(name); err != nil {
+ return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ }
+ return uid, gid, nil
+}
+
+func addUser(userName string) error {
+ once.Do(func() {
+ // set up which commands are used for adding users/groups dependent on distro
+ if _, err := resolveBinary("adduser"); err == nil {
+ userCommand = "adduser"
+ } else if _, err := resolveBinary("useradd"); err == nil {
+ userCommand = "useradd"
+ }
+ })
+ if userCommand == "" {
+ return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ }
+ args := fmt.Sprintf(cmdTemplates[userCommand], userName)
+ out, err := execCmd(userCommand, args)
+ if err != nil {
+ return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ }
+ return nil
+}
+
+func createSubordinateRanges(name string) error {
+
+ // first, we should verify that ranges weren't automatically created
+ // by the distro tooling
+ ranges, err := parseSubuid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no UID ranges; let's create one
+ startID, err := findNextUIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subuid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+
+ ranges, err = parseSubgid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no GID ranges; let's create one
+ startID, err := findNextGIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subgid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+ return nil
+}
+
+func findNextUIDRange() (int, error) {
+ ranges, err := parseSubuid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextGIDRange() (int, error) {
+ ranges, err := parseSubgid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextRangeStart(rangeList ranges) (int, error) {
+ startID := defaultRangeStart
+ for _, arange := range rangeList {
+ if wouldOverlap(arange, startID) {
+ startID = arange.Start + arange.Length
+ }
+ }
+ return startID, nil
+}
+
+func wouldOverlap(arange subIDRange, ID int) bool {
+ low := ID
+ high := ID + defaultRangeLen
+ if (low >= arange.Start && low <= arange.Start+arange.Length) ||
+ (high <= arange.Start+arange.Length && high >= arange.Start) {
+ return true
+ }
+ return false
+}
+
+func execCmd(cmd, args string) ([]byte, error) {
+ execCmd := exec.Command(cmd, strings.Split(args, " ")...)
+ return execCmd.CombinedOutput()
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
new file mode 100644
index 000000000..d98b354cb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package idtools
+
+import "fmt"
+
+// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
+// and calls the appropriate helper function to add the group and then
+// the user to the group in /etc/group and /etc/passwd respectively.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
new file mode 100644
index 000000000..3d737b3e1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
@@ -0,0 +1,51 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+)
+
+var errBufferFull = errors.New("buffer is full")
+
+type fixedBuffer struct {
+ buf []byte
+ pos int
+ lastRead int
+}
+
+func (b *fixedBuffer) Write(p []byte) (int, error) {
+ n := copy(b.buf[b.pos:cap(b.buf)], p)
+ b.pos += n
+
+ if n < len(p) {
+ if b.pos == cap(b.buf) {
+ return n, errBufferFull
+ }
+ return n, io.ErrShortWrite
+ }
+ return n, nil
+}
+
+func (b *fixedBuffer) Read(p []byte) (int, error) {
+ n := copy(p, b.buf[b.lastRead:b.pos])
+ b.lastRead += n
+ return n, nil
+}
+
+func (b *fixedBuffer) Len() int {
+ return b.pos - b.lastRead
+}
+
+func (b *fixedBuffer) Cap() int {
+ return cap(b.buf)
+}
+
+func (b *fixedBuffer) Reset() {
+ b.pos = 0
+ b.lastRead = 0
+ b.buf = b.buf[:0]
+}
+
+func (b *fixedBuffer) String() string {
+ return string(b.buf[b.lastRead:b.pos])
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
new file mode 100644
index 000000000..eca129be3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
@@ -0,0 +1,185 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// maxCap is the highest capacity to use in byte slices that buffer data.
+const maxCap = 1e6
+
+// minCap is the lowest capacity to use in byte slices that buffer data
+const minCap = 64
+
+// blockThreshold is the minimum number of bytes in the buffer which will cause
+// a write to BytesPipe to block when allocating a new slice.
+const blockThreshold = 1e6
+
+var (
+ // ErrClosed is returned when Write is called on a closed BytesPipe.
+ ErrClosed = errors.New("write to closed BytesPipe")
+
+ bufPools = make(map[int]*sync.Pool)
+ bufPoolsLock sync.Mutex
+)
+
+// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
+// All written data may be read at most once. Also, BytesPipe allocates
+// and releases new byte slices to adjust to current needs, so the buffer
+// won't be overgrown after peak loads.
+type BytesPipe struct {
+ mu sync.Mutex
+ wait *sync.Cond
+ buf []*fixedBuffer
+ bufLen int
+ closeErr error // error to return from next Read. set to nil if not closed.
+}
+
+// NewBytesPipe creates new BytesPipe, initialized by specified slice.
+// If buf is nil, then it will be initialized with slice which cap is 64.
+// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
+func NewBytesPipe() *BytesPipe {
+ bp := &BytesPipe{}
+ bp.buf = append(bp.buf, getBuffer(minCap))
+ bp.wait = sync.NewCond(&bp.mu)
+ return bp
+}
+
+// Write writes p to BytesPipe.
+// It can allocate new []byte slices in a process of writing.
+func (bp *BytesPipe) Write(p []byte) (int, error) {
+ bp.mu.Lock()
+
+ written := 0
+loop0:
+ for {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return written, ErrClosed
+ }
+
+ if len(bp.buf) == 0 {
+ bp.buf = append(bp.buf, getBuffer(64))
+ }
+ // get the last buffer
+ b := bp.buf[len(bp.buf)-1]
+
+ n, err := b.Write(p)
+ written += n
+ bp.bufLen += n
+
+ // errBufferFull is an error we expect to get if the buffer is full
+ if err != nil && err != errBufferFull {
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, err
+ }
+
+ // if there was enough room to write all then break
+ if len(p) == n {
+ break
+ }
+
+ // more data: write to the next slice
+ p = p[n:]
+
+ // make sure the buffer doesn't grow too big from this write
+ for bp.bufLen >= blockThreshold {
+ bp.wait.Wait()
+ if bp.closeErr != nil {
+ continue loop0
+ }
+ }
+
+ // add new byte slice to the buffers slice and continue writing
+ nextCap := b.Cap() * 2
+ if nextCap > maxCap {
+ nextCap = maxCap
+ }
+ bp.buf = append(bp.buf, getBuffer(nextCap))
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, nil
+}
+
+// CloseWithError causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) CloseWithError(err error) error {
+ bp.mu.Lock()
+ if err != nil {
+ bp.closeErr = err
+ } else {
+ bp.closeErr = io.EOF
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return nil
+}
+
+// Close causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) Close() error {
+ return bp.CloseWithError(nil)
+}
+
+// Read reads bytes from BytesPipe.
+// Data could be read only once.
+func (bp *BytesPipe) Read(p []byte) (n int, err error) {
+ bp.mu.Lock()
+ if bp.bufLen == 0 {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return 0, bp.closeErr
+ }
+ bp.wait.Wait()
+ if bp.bufLen == 0 && bp.closeErr != nil {
+ bp.mu.Unlock()
+ return 0, bp.closeErr
+ }
+ }
+
+ for bp.bufLen > 0 {
+ b := bp.buf[0]
+ read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
+ n += read
+ bp.bufLen -= read
+
+ if b.Len() == 0 {
+ // it's empty so return it to the pool and move to the next one
+ returnBuffer(b)
+ bp.buf[0] = nil
+ bp.buf = bp.buf[1:]
+ }
+
+ if len(p) == read {
+ break
+ }
+
+ p = p[read:]
+ }
+
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return
+}
+
+func returnBuffer(b *fixedBuffer) {
+ b.Reset()
+ bufPoolsLock.Lock()
+ pool := bufPools[b.Cap()]
+ bufPoolsLock.Unlock()
+ if pool != nil {
+ pool.Put(b)
+ }
+}
+
+func getBuffer(size int) *fixedBuffer {
+ bufPoolsLock.Lock()
+ pool, ok := bufPools[size]
+ if !ok {
+ pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
+ bufPools[size] = pool
+ }
+ bufPoolsLock.Unlock()
+ return pool.Get().(*fixedBuffer)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go
new file mode 100644
index 000000000..0b04b0ba3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go
@@ -0,0 +1,22 @@
+package ioutils
+
+import (
+ "fmt"
+ "io"
+)
+
+// FprintfIfNotEmpty prints the string value if it's not empty
+func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
+ if value != "" {
+ return fmt.Fprintf(w, format, value)
+ }
+ return 0, nil
+}
+
+// FprintfIfTrue prints the boolean value if it's true
+func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
+ if ok {
+ return fmt.Fprintf(w, format, ok)
+ }
+ return 0, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
new file mode 100644
index 000000000..ca9767072
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
@@ -0,0 +1,75 @@
+package ioutils
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ if err != nil {
+ return nil, err
+ }
+ abspath, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ return &atomicFileWriter{
+ f: f,
+ fn: abspath,
+ }, nil
+}
+
+// AtomicWriteFile atomically writes data to a file named by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := NewAtomicFileWriter(filename, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type atomicFileWriter struct {
+ f *os.File
+ fn string
+ writeErr error
+}
+
+func (w *atomicFileWriter) Write(dt []byte) (int, error) {
+ n, err := w.f.Write(dt)
+ if err != nil {
+ w.writeErr = err
+ }
+ return n, err
+}
+
+func (w *atomicFileWriter) Close() (retErr error) {
+ defer func() {
+ if retErr != nil {
+ os.Remove(w.f.Name())
+ }
+ }()
+ if err := w.f.Sync(); err != nil {
+ w.f.Close()
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ if w.writeErr == nil {
+ return os.Rename(w.f.Name(), w.fn)
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go
new file mode 100644
index 000000000..0d2d76b47
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go
@@ -0,0 +1,226 @@
+package ioutils
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+type pos struct {
+ idx int
+ offset int64
+}
+
+type multiReadSeeker struct {
+ readers []io.ReadSeeker
+ pos *pos
+ posIdx map[io.ReadSeeker]int
+}
+
+func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ var tmpOffset int64
+ switch whence {
+ case os.SEEK_SET:
+ for i, rdr := range r.readers {
+ // get size of the current reader
+ s, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+
+ if offset > tmpOffset+s {
+ if i == len(r.readers)-1 {
+ rdrOffset := s + (offset - tmpOffset)
+ if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
+ return -1, err
+ }
+ r.pos = &pos{i, rdrOffset}
+ return offset, nil
+ }
+
+ tmpOffset += s
+ continue
+ }
+
+ rdrOffset := offset - tmpOffset
+ idx := i
+
+ rdr.Seek(rdrOffset, os.SEEK_SET)
+ // make sure all following readers are at 0
+ for _, rdr := range r.readers[i+1:] {
+ rdr.Seek(0, os.SEEK_SET)
+ }
+
+ if rdrOffset == s && i != len(r.readers)-1 {
+ idx++
+ rdrOffset = 0
+ }
+ r.pos = &pos{idx, rdrOffset}
+ return offset, nil
+ }
+ case os.SEEK_END:
+ for _, rdr := range r.readers {
+ s, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+ tmpOffset += s
+ }
+ r.Seek(tmpOffset+offset, os.SEEK_SET)
+ return tmpOffset + offset, nil
+ case os.SEEK_CUR:
+ if r.pos == nil {
+ return r.Seek(offset, os.SEEK_SET)
+ }
+ // Just return the current offset
+ if offset == 0 {
+ return r.getCurOffset()
+ }
+
+ curOffset, err := r.getCurOffset()
+ if err != nil {
+ return -1, err
+ }
+ rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
+ if err != nil {
+ return -1, err
+ }
+
+ r.pos = &pos{r.posIdx[rdr], rdrOffset}
+ return curOffset + offset, nil
+ default:
+ return -1, fmt.Errorf("Invalid whence: %d", whence)
+ }
+
+ return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
+}
+
+func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
+ var rdr io.ReadSeeker
+ var rdrOffset int64
+
+ for i, rdr := range r.readers {
+ offsetTo, err := r.getOffsetToReader(rdr)
+ if err != nil {
+ return nil, -1, err
+ }
+ if offsetTo > offset {
+ rdr = r.readers[i-1]
+ rdrOffset = offsetTo - offset
+ break
+ }
+
+ if rdr == r.readers[len(r.readers)-1] {
+ rdrOffset = offsetTo + offset
+ break
+ }
+ }
+
+ return rdr, rdrOffset, nil
+}
+
+func (r *multiReadSeeker) getCurOffset() (int64, error) {
+ var totalSize int64
+ for _, rdr := range r.readers[:r.pos.idx+1] {
+ if r.posIdx[rdr] == r.pos.idx {
+ totalSize += r.pos.offset
+ break
+ }
+
+ size, err := getReadSeekerSize(rdr)
+ if err != nil {
+ return -1, fmt.Errorf("error getting seeker size: %v", err)
+ }
+ totalSize += size
+ }
+ return totalSize, nil
+}
+
+func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
+ var offset int64
+ for _, r := range r.readers {
+ if r == rdr {
+ break
+ }
+
+ size, err := getReadSeekerSize(rdr)
+ if err != nil {
+ return -1, err
+ }
+ offset += size
+ }
+ return offset, nil
+}
+
+func (r *multiReadSeeker) Read(b []byte) (int, error) {
+ if r.pos == nil {
+ r.pos = &pos{0, 0}
+ }
+
+ bCap := int64(cap(b))
+ buf := bytes.NewBuffer(nil)
+ var rdr io.ReadSeeker
+
+ for _, rdr = range r.readers[r.pos.idx:] {
+ readBytes, err := io.CopyN(buf, rdr, bCap)
+ if err != nil && err != io.EOF {
+ return -1, err
+ }
+ bCap -= readBytes
+
+ if bCap == 0 {
+ break
+ }
+ }
+
+ rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+ r.pos = &pos{r.posIdx[rdr], rdrPos}
+ return buf.Read(b)
+}
+
+func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
+ // save the current position
+ pos, err := rdr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+
+ // get the size
+ size, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+
+ // reset the position
+ if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
+ return -1, err
+ }
+ return size, nil
+}
+
+// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
+// input readseekers. After calling this method the initial position is set to the
+// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
+// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
+// Seek can be used over the sum of lengths of all readseekers.
+//
+// When a MultiReadSeeker is used, no Read and Seek operations should be made on
+// its ReadSeeker components. Also, users should make no assumption on the state
+// of individual readseekers while the MultiReadSeeker is used.
+func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
+ if len(readers) == 1 {
+ return readers[0]
+ }
+ idx := make(map[io.ReadSeeker]int)
+ for i, rdr := range readers {
+ idx[rdr] = i
+ }
+ return &multiReadSeeker{
+ readers: readers,
+ posIdx: idx,
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 000000000..e73b02bbf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,154 @@
+package ioutils
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewReadCloserWrapper returns a new io.ReadCloser.
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+// NewReaderErrWrapper returns a new io.Reader.
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// HashData returns the sha256 sum of src.
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// OnEOFReader wraps a io.ReadCloser and a function
+// the function will run at the end of file or close the file.
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+// Close closes the file and run the function.
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
+
+// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
+// operations.
+type cancelReadCloser struct {
+ cancel func()
+ pR *io.PipeReader // Stream to read from
+ pW *io.PipeWriter
+}
+
+// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
+// context is cancelled. The returned io.ReadCloser must be closed when it is
+// no longer needed.
+func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
+ pR, pW := io.Pipe()
+
+ // Create a context used to signal when the pipe is closed
+ doneCtx, cancel := context.WithCancel(context.Background())
+
+ p := &cancelReadCloser{
+ cancel: cancel,
+ pR: pR,
+ pW: pW,
+ }
+
+ go func() {
+ _, err := io.Copy(pW, in)
+ select {
+ case <-ctx.Done():
+ // If the context was closed, p.closeWithError
+ // was already called. Calling it again would
+ // change the error that Read returns.
+ default:
+ p.closeWithError(err)
+ }
+ in.Close()
+ }()
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ p.closeWithError(ctx.Err())
+ case <-doneCtx.Done():
+ return
+ }
+ }
+ }()
+
+ return p
+}
+
+// Read wraps the Read method of the pipe that provides data from the wrapped
+// ReadCloser.
+func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
+ return p.pR.Read(buf)
+}
+
+// closeWithError closes the wrapper and its underlying reader. It will
+// cause future calls to Read to return err.
+func (p *cancelReadCloser) closeWithError(err error) {
+ p.pW.CloseWithError(err)
+ p.cancel()
+}
+
+// Close closes the wrapper its underlying reader. It will cause
+// future calls to Read to return io.EOF.
+func (p *cancelReadCloser) Close() error {
+ p.closeWithError(io.EOF)
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
new file mode 100644
index 000000000..1539ad21b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ioutils
+
+import "io/ioutil"
+
+// TempDir on Unix systems is equivalent to ioutil.TempDir.
+func TempDir(dir, prefix string) (string, error) {
+ return ioutil.TempDir(dir, prefix)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
new file mode 100644
index 000000000..c258e5fdd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package ioutils
+
+import (
+ "io/ioutil"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+func TempDir(dir, prefix string) (string, error) {
+ tempDir, err := ioutil.TempDir(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ return longpath.AddPrefix(tempDir), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 000000000..52a4901ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,92 @@
+package ioutils
+
+import (
+ "io"
+ "sync"
+)
+
+// WriteFlusher wraps the Write and Flush operation ensuring that every write
+// is a flush. In addition, the Close method can be called to intercept
+// Read/Write calls if the targets lifecycle has already ended.
+type WriteFlusher struct {
+ w io.Writer
+ flusher flusher
+ flushed chan struct{}
+ flushedOnce sync.Once
+ closed chan struct{}
+ closeLock sync.Mutex
+}
+
+type flusher interface {
+ Flush()
+}
+
+var errWriteFlusherClosed = io.EOF
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ select {
+ case <-wf.closed:
+ return 0, errWriteFlusherClosed
+ default:
+ }
+
+ n, err = wf.w.Write(b)
+ wf.Flush() // every write is a flush.
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ select {
+ case <-wf.closed:
+ return
+ default:
+ }
+
+ wf.flushedOnce.Do(func() {
+ close(wf.flushed)
+ })
+ wf.flusher.Flush()
+}
+
+// Flushed returns the state of flushed.
+// If it's flushed, return true, or else it return false.
+func (wf *WriteFlusher) Flushed() bool {
+ // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
+ // be used to detect whether or a response code has been issued or not.
+ // Another hook should be used instead.
+ var flushed bool
+ select {
+ case <-wf.flushed:
+ flushed = true
+ default:
+ }
+ return flushed
+}
+
+// Close closes the write flusher, disallowing any further writes to the
+// target. After the flusher is closed, all calls to write or flush will
+// result in an error.
+func (wf *WriteFlusher) Close() error {
+ wf.closeLock.Lock()
+ defer wf.closeLock.Unlock()
+
+ select {
+ case <-wf.closed:
+ return errWriteFlusherClosed
+ default:
+ close(wf.closed)
+ }
+ return nil
+}
+
+// NewWriteFlusher returns a new WriteFlusher.
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var fl flusher
+ if f, ok := w.(flusher); ok {
+ fl = f
+ } else {
+ fl = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 000000000..ccc7f9c23
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,66 @@
+package ioutils
+
+import "io"
+
+// NopWriter represents a type which write operation is nop.
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+// NopWriteCloser returns a nopWriteCloser.
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+// NopFlusher represents a type which flush operation is nop.
+type NopFlusher struct{}
+
+// Flush is a nop operation.
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewWriteCloserWrapper returns a new io.WriteCloser.
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
+
+// WriteCounter wraps a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+ Count int64
+ Writer io.Writer
+}
+
+// NewWriteCounter returns a new WriteCounter.
+func NewWriteCounter(w io.Writer) *WriteCounter {
+ return &WriteCounter{
+ Writer: w,
+ }
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+ count, err = wc.Writer.Write(p)
+ wc.Count += int64(count)
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
new file mode 100644
index 000000000..9b15bfff4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+ "strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+ if !strings.HasPrefix(path, Prefix) {
+ if strings.HasPrefix(path, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ path = Prefix + `UNC` + path[1:]
+ } else {
+ path = Prefix + path
+ }
+ }
+ return path
+}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 000000000..76e84f9d7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,119 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool *BufioReaderPool
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool *BufioWriterPool
+)
+
+const buffer32K = 32 * 1024
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool sync.Pool
+}
+
+func init() {
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ pool := sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ }
+ return &BufioReaderPool{pool: pool}
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := BufioReader32KPool.Get(src)
+ written, err = io.Copy(dst, buf)
+ BufioReader32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ pool := sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ }
+ return &BufioWriterPool{pool: pool}
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go
new file mode 100644
index 000000000..dd52b9082
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/promise/promise.go
@@ -0,0 +1,11 @@
+package promise
+
+// Go is a basic promise implementation: it wraps calls a function in a goroutine,
+// and returns a channel which will later return the function's return value.
+func Go(f func() error) chan error {
+ ch := make(chan error, 1)
+ go func() {
+ ch <- f()
+ }()
+ return ch
+}
diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
new file mode 100644
index 000000000..8f67ece94
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -0,0 +1,185 @@
+package stdcopy
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// StdType is the type of standard stream
+// a writer can multiplex to.
+type StdType byte
+
+const (
+ // Stdin represents standard input stream type.
+ Stdin StdType = iota
+ // Stdout represents standard output stream type.
+ Stdout
+ // Stderr represents standard error steam type.
+ Stderr
+
+ stdWriterPrefixLen = 8
+ stdWriterFdIndex = 0
+ stdWriterSizeIndex = 4
+
+ startingBufLen = 32*1024 + stdWriterPrefixLen + 1
+)
+
+var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
+
+// stdWriter is wrapper of io.Writer with extra customized info.
+type stdWriter struct {
+ io.Writer
+ prefix byte
+}
+
+// Write sends the buffer to the underneath writer.
+// It inserts the prefix header before the buffer,
+// so stdcopy.StdCopy knows where to multiplex the output.
+// It makes stdWriter to implement io.Writer.
+func (w *stdWriter) Write(p []byte) (n int, err error) {
+ if w == nil || w.Writer == nil {
+ return 0, errors.New("Writer not instantiated")
+ }
+ if p == nil {
+ return 0, nil
+ }
+
+ header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
+ binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Write(header[:])
+ buf.Write(p)
+
+ n, err = w.Writer.Write(buf.Bytes())
+ n -= stdWriterPrefixLen
+ if n < 0 {
+ n = 0
+ }
+
+ buf.Reset()
+ bufPool.Put(buf)
+ return
+}
+
+// NewStdWriter instantiates a new Writer.
+// Everything written to it will be encapsulated using a custom format,
+// and written to the underlying `w` stream.
+// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
+// `t` indicates the id of the stream to encapsulate.
+// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
+func NewStdWriter(w io.Writer, t StdType) io.Writer {
+ return &stdWriter{
+ Writer: w,
+ prefix: byte(t),
+ }
+}
+
+// StdCopy is a modified version of io.Copy.
+//
+// StdCopy will demultiplex `src`, assuming that it contains two streams,
+// previously multiplexed together using a StdWriter instance.
+// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
+//
+// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
+// In other words: if `err` is non nil, it indicates a real underlying error.
+//
+// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+ var (
+ buf = make([]byte, startingBufLen)
+ bufLen = len(buf)
+ nr, nw int
+ er, ew error
+ out io.Writer
+ frameSize int
+ )
+
+ for {
+ // Make sure we have at least a full header
+ for nr < stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < stdWriterPrefixLen {
+ logrus.Debugf("Corrupted prefix: %v", buf[:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ logrus.Debugf("Error reading header: %s", er)
+ return 0, er
+ }
+ }
+
+ // Check the first byte to know where to write
+ switch StdType(buf[stdWriterFdIndex]) {
+ case Stdin:
+ fallthrough
+ case Stdout:
+ // Write on stdout
+ out = dstout
+ case Stderr:
+ // Write on stderr
+ out = dsterr
+ default:
+ logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
+ return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
+ }
+
+ // Retrieve the size of the frame
+ frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
+ logrus.Debugf("framesize: %d", frameSize)
+
+ // Check if the buffer is big enough to read the frame.
+ // Extend it if necessary.
+ if frameSize+stdWriterPrefixLen > bufLen {
+ logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf))
+ buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
+ bufLen = len(buf)
+ }
+
+ // While the amount of bytes read is less than the size of the frame + header, we keep reading
+ for nr < frameSize+stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < frameSize+stdWriterPrefixLen {
+ logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ logrus.Debugf("Error reading frame: %s", er)
+ return 0, er
+ }
+ }
+
+ // Write the retrieved frame (without header)
+ nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
+ if ew != nil {
+ logrus.Debugf("Error writing frame: %s", ew)
+ return 0, ew
+ }
+ // If the frame has not been fully written: error
+ if nw != frameSize {
+ logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
+ return 0, io.ErrShortWrite
+ }
+ written += int64(nw)
+
+ // Move the rest of the buffer to the beginning
+ copy(buf, buf[frameSize+stdWriterPrefixLen:])
+ // Move the index
+ nr -= frameSize + stdWriterPrefixLen
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go
new file mode 100644
index 000000000..7637f12e1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go
@@ -0,0 +1,52 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+var (
+ maxTime time.Time
+)
+
+func init() {
+ if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+ // This is a 64 bit timespec
+ // os.Chtimes limits time to the following
+ maxTime = time.Unix(0, 1<<63-1)
+ } else {
+ // This is a 32 bit timespec
+ maxTime = time.Unix(1<<31-1, 0)
+ }
+}
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ unixMinTime := time.Unix(0, 0)
+ unixMaxTime := maxTime
+
+ // If the modified time is prior to the Unix Epoch, or after the
+ // end of Unix Time, os.Chtimes has undefined behavior
+ // default to Unix Epoch in this case, just in case
+
+ if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+ atime = unixMinTime
+ }
+
+ if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+ mtime = unixMinTime
+ }
+
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ // Take platform specific action for setting create time.
+ if err := setCTime(name, mtime); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
new file mode 100644
index 000000000..09d58bcbf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+import (
+ "time"
+)
+
+//setCTime will set the create time on a file. On Unix, the create
+//time is updated as a side effect of setting the modified time, so
+//no action is required.
+func setCTime(path string, ctime time.Time) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
new file mode 100644
index 000000000..294586846
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
@@ -0,0 +1,27 @@
+// +build windows
+
+package system
+
+import (
+ "syscall"
+ "time"
+)
+
+//setCTime will set the create time on a file. On Windows, this requires
+//calling SetFileTime and explicitly including the create time.
+func setCTime(path string, ctime time.Time) error {
+ ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
+ pathp, e := syscall.UTF16PtrFromString(path)
+ if e != nil {
+ return e
+ }
+ h, e := syscall.CreateFile(pathp,
+ syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
+ syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if e != nil {
+ return e
+ }
+ defer syscall.Close(h)
+ c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
+ return syscall.SetFileTime(h, &c, nil, nil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 000000000..288318985
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,10 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ // ErrNotSupportedPlatform means the platform is not supported.
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go
new file mode 100644
index 000000000..04e2de787
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go
@@ -0,0 +1,83 @@
+package system
+
+// This file implements syscalls for Win32 events which are not implemented
+// in golang.
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ procCreateEvent = modkernel32.NewProc("CreateEventW")
+ procOpenEvent = modkernel32.NewProc("OpenEventW")
+ procSetEvent = modkernel32.NewProc("SetEvent")
+ procResetEvent = modkernel32.NewProc("ResetEvent")
+ procPulseEvent = modkernel32.NewProc("PulseEvent")
+)
+
+// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
+func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32
+ if manualReset {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if initialState {
+ _p2 = 1
+ }
+ r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32
+ if inheritHandle {
+ _p1 = 1
+ }
+ r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// SetEvent implements win32 SetEvent func in golang.
+func SetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procSetEvent)
+}
+
+// ResetEvent implements win32 ResetEvent func in golang.
+func ResetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procResetEvent)
+}
+
+// PulseEvent implements win32 PulseEvent func in golang.
+func PulseEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procPulseEvent)
+}
+
+func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) {
+ r0, _, _ := proc.Call(uintptr(handle))
+ if r0 != 0 {
+ err = syscall.Errno(r0)
+ }
+ return
+}
+
+var temp unsafe.Pointer
+
+// use ensures a variable is kept alive without the GC freeing while still needed
+func use(p unsafe.Pointer) {
+ temp = p
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 000000000..c14feb849
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 000000000..16823d551
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,82 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "syscall"
+)
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, perm os.FileMode) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = MkdirAll(path[0:j-1], perm)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = os.Mkdir(path, perm)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+ if !filepath.IsAbs(path) {
+ if !strings.HasPrefix(path, string(os.PathSeparator)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go
new file mode 100644
index 000000000..bd23c4d50
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 000000000..49e87eb40
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+)
+
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+// Note the Linux version uses fromStatT to do the copy back,
+// but that not strictly necessary when already in an OS specific module.
+func Lstat(path string) (*StatT, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &StatT{
+ name: fi.Name(),
+ size: fi.Size(),
+ mode: fi.Mode(),
+ modTime: fi.ModTime(),
+ isDir: fi.IsDir()}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 000000000..3b6e947e6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 000000000..385f1d5e7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,65 @@
+package system
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/go-units"
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given an io.Reader to the file.
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
new file mode 100644
index 000000000..313c601b1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
@@ -0,0 +1,128 @@
+// +build solaris,cgo
+
+package system
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+// #cgo LDFLAGS: -lkstat
+// #include
+// #include
+// #include
+// #include
+// #include
+// #include
+// struct swaptable *allocSwaptable(int num) {
+// struct swaptable *st;
+// struct swapent *swapent;
+// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
+// swapent = st->swt_ent;
+// for (int i = 0; i < num; i++,swapent++) {
+// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
+// }
+// st->swt_n = num;
+// return st;
+//}
+// void freeSwaptable (struct swaptable *st) {
+// struct swapent *swapent = st->swt_ent;
+// for (int i = 0; i < st->swt_n; i++,swapent++) {
+// free(swapent->ste_path);
+// }
+// free(st);
+// }
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
+// return ent[i];
+// }
+// int64_t getPpKernel() {
+// int64_t pp_kernel = 0;
+// kstat_ctl_t *ksc;
+// kstat_t *ks;
+// kstat_named_t *knp;
+// kid_t kid;
+//
+// if ((ksc = kstat_open()) == NULL) {
+// return -1;
+// }
+// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
+// return -1;
+// }
+// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
+// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
+// return -1;
+// }
+// switch (knp->data_type) {
+// case KSTAT_DATA_UINT64:
+// pp_kernel = knp->value.ui64;
+// break;
+// case KSTAT_DATA_UINT32:
+// pp_kernel = knp->value.ui32;
+// break;
+// }
+// pp_kernel *= sysconf(_SC_PAGESIZE);
+// return (pp_kernel > 0 ? pp_kernel : -1);
+// }
+import "C"
+
+// Get the system memory info using sysconf same as prtconf
+func getTotalMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_PHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+func getFreeMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_AVPHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+
+ ppKernel := C.getPpKernel()
+ MemTotal := getTotalMem()
+ MemFree := getFreeMem()
+ SwapTotal, SwapFree, err := getSysSwap()
+
+ if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
+ SwapFree < 0 {
+ return nil, fmt.Errorf("Error getting system memory info %v\n", err)
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal - int64(ppKernel)
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
+
+func getSysSwap() (int64, int64, error) {
+ var tSwap int64
+ var fSwap int64
+ var diskblksPerPage int64
+ num, err := C.swapctl(C.SC_GETNSWP, nil)
+ if err != nil {
+ return -1, -1, err
+ }
+ st := C.allocSwaptable(num)
+ _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
+ if err != nil {
+ C.freeSwaptable(st)
+ return -1, -1, err
+ }
+
+ diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
+ for i := 0; i < int(num); i++ {
+ swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
+ tSwap += int64(swapent.ste_pages) * diskblksPerPage
+ fSwap += int64(swapent.ste_free) * diskblksPerPage
+ }
+ C.freeSwaptable(st)
+ return tSwap, fSwap, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 000000000..3ce019dff
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows,!solaris
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 000000000..d46642598
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,44 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 000000000..73958182b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return syscall.Mknod(path, mode, dev)
+}
+
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor.
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 000000000..2e863c021
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package system
+
+// Mknod is not implemented on Windows.
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+// Mkdev is not implemented on Windows.
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
new file mode 100644
index 000000000..c607c4db0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
+const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive. This is a no-op on Linux.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return path, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
new file mode 100644
index 000000000..cbfe2c157
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go
@@ -0,0 +1,37 @@
+// +build windows
+
+package system
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
+// the container. Docker has no context of what the default path should be.
+const DefaultPathEnv = ""
+
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be contatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !filepath.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go
new file mode 100644
index 000000000..087034c5e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat.go
@@ -0,0 +1,53 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+ return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+ return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+ return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+ return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+// GetLastModification returns file's last modification time.
+func (s StatT) GetLastModification() syscall.Timespec {
+ return s.Mtim()
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
new file mode 100644
index 000000000..d0fb6f151
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
+
+// Stat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 000000000..8b1eded13
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT exists only on linux, and loads a system.StatT from a
+// syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
new file mode 100644
index 000000000..3c3b71fb2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
@@ -0,0 +1,15 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
new file mode 100644
index 000000000..0216985a2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
@@ -0,0 +1,34 @@
+// +build solaris
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT loads a system.StatT from a syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go
new file mode 100644
index 000000000..f53e9de4d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go
@@ -0,0 +1,17 @@
+// +build !linux,!windows,!freebsd,!solaris,!openbsd
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 000000000..39490c625
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,43 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like name, permission, size, etc about a file.
+type StatT struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+ isDir bool
+}
+
+// Name returns file's name.
+func (s StatT) Name() string {
+ return s.name
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+ return s.mode
+}
+
+// ModTime returns file's last modification time.
+func (s StatT) ModTime() time.Time {
+ return s.modTime
+}
+
+// IsDir returns whether file is actually a directory.
+func (s StatT) IsDir() bool {
+ return s.isDir
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
new file mode 100644
index 000000000..3ae912846
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -0,0 +1,17 @@
+// +build linux freebsd
+
+package system
+
+import "syscall"
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall.
+func Unmount(dest string) error {
+ return syscall.Unmount(dest, 0)
+}
+
+// CommandLineToArgv should not be used on Unix.
+// It simply returns commandLine in the only element in the returned array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ return []string{commandLine}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
new file mode 100644
index 000000000..f5f2d5694
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -0,0 +1,103 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "github.com/Sirupsen/logrus"
+)
+
+var (
+ ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
+ procGetVersionExW = modkernel32.NewProc("GetVersionExW")
+)
+
+// OSVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type OSVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+type osVersionInfoEx struct {
+ OSVersionInfoSize uint32
+ MajorVersion uint32
+ MinorVersion uint32
+ BuildNumber uint32
+ PlatformID uint32
+ CSDVersion [128]uint16
+ ServicePackMajor uint16
+ ServicePackMinor uint16
+ SuiteMask uint16
+ ProductType byte
+ Reserve byte
+}
+
+// GetOSVersion gets the operating system version on Windows. Note that
+// docker.exe must be manifested to get the correct version information.
+func GetOSVersion() OSVersion {
+ var err error
+ osv := OSVersion{}
+ osv.Version, err = syscall.GetVersion()
+ if err != nil {
+ // GetVersion never fails.
+ panic(err)
+ }
+ osv.MajorVersion = uint8(osv.Version & 0xFF)
+ osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
+ osv.Build = uint16(osv.Version >> 16)
+ return osv
+}
+
+// IsWindowsClient returns true if the SKU is client
+func IsWindowsClient() bool {
+ osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
+ r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
+ if r1 == 0 {
+ logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
+ return false
+ }
+ const verNTWorkstation = 0x00000001
+ return osviex.ProductType == verNTWorkstation
+}
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall. Not supported on Windows
+func Unmount(dest string) error {
+ return nil
+}
+
+// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ var argc int32
+
+ argsPtr, err := syscall.UTF16PtrFromString(commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ argv, err := syscall.CommandLineToArgv(argsPtr, &argc)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv))))
+
+ newArgs := make([]string, argc)
+ for i, v := range (*argv)[:argc] {
+ newArgs[i] = string(syscall.UTF16ToString((*v)[:]))
+ }
+
+ return newArgs, nil
+}
+
+// HasWin32KSupport determines whether containers that depend on win32k can
+// run on this machine. Win32k is the driver used to implement windowing.
+func HasWin32KSupport() bool {
+ // For now, check for ntuser API support on the host. In the future, a host
+ // may support win32k in containers even if the host does not support ntuser
+ // APIs.
+ return ntuserApiset.Load() == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 000000000..3d0146b01
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Umask sets current process's file mode creation mask to newmask
+// and returns oldmask.
+func Umask(newmask int) (oldmask int, err error) {
+ return syscall.Umask(newmask), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 000000000..13f1de176
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package system
+
+// Umask is not supported on the windows platform.
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go
new file mode 100644
index 000000000..0a1619754
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go
@@ -0,0 +1,8 @@
+package system
+
+import "syscall"
+
+// LUtimesNano is not supported by darwin platform.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 000000000..e2eac3b55
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,22 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 000000000..fc8a1aba9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,26 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ // These are not currently available in syscall
+ atFdCwd := -100
+ atSymLinkNoFollow := 0x100
+
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 000000000..50c3a0436
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!freebsd,!darwin
+
+package system
+
+import "syscall"
+
+// LUtimesNano is not supported on platforms other than linux, freebsd and darwin.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 000000000..d2e2c0579
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,63 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ dest := make([]byte, 128)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno == syscall.ENODATA {
+ return nil, nil
+ }
+ if errno == syscall.ERANGE {
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ }
+ if errno != 0 {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+var _zero uintptr
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 000000000..0114f2227
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package system
+
+// Lgetxattr is not supported on platforms other than linux.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+// Lsetxattr is not supported on platforms other than linux.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md
new file mode 100644
index 000000000..9ea86d784
--- /dev/null
+++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contributing to go-units
+
+Want to hack on go-units? Awesome! Here are instructions to get you started.
+
+go-units is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read Docker's
+[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
+[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
+[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
+[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/vendor/github.com/docker/go-units/LICENSE.code b/vendor/github.com/docker/go-units/LICENSE.code
new file mode 100644
index 000000000..b55b37bc3
--- /dev/null
+++ b/vendor/github.com/docker/go-units/LICENSE.code
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-units/LICENSE.docs b/vendor/github.com/docker/go-units/LICENSE.docs
new file mode 100644
index 000000000..e26cd4fc8
--- /dev/null
+++ b/vendor/github.com/docker/go-units/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS
new file mode 100644
index 000000000..477be8b21
--- /dev/null
+++ b/vendor/github.com/docker/go-units/MAINTAINERS
@@ -0,0 +1,27 @@
+# go-connections maintainers file
+#
+# This file describes who runs the docker/go-connections project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "calavera",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+ [people.calavera]
+ Name = "David Calavera"
+ Email = "david.calavera@gmail.com"
+ GitHub = "calavera"
diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 000000000..3ce4d79da
--- /dev/null
+++ b/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,18 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code
+is released under the Apache 2.0 license. The README.md file, and files in the
+"docs" folder are licensed under the Creative Commons Attribution 4.0
+International License under the terms and conditions set forth in the file
+"LICENSE.docs". You may obtain a duplicate copy of the same license, titled
+CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml
new file mode 100644
index 000000000..9043b3547
--- /dev/null
+++ b/vendor/github.com/docker/go-units/circle.yml
@@ -0,0 +1,11 @@
+dependencies:
+ post:
+ # install golint
+ - go get github.com/golang/lint/golint
+
+test:
+ pre:
+ # run analysis before tests
+ - go vet ./...
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 000000000..c219a8a96
--- /dev/null
+++ b/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 000000000..989edd29b
--- /dev/null
+++ b/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,95 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ for size >= base {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 4 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[3])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= float64(mul)
+ }
+
+ return int64(size), nil
+}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 000000000..5ac7fd825
--- /dev/null
+++ b/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,118 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore
deleted file mode 100644
index 5f6b48eae..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-# temporary symlink for testing
-testing/data/symlink
diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml
deleted file mode 100644
index 68b137ad2..000000000
--- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-language: go
-sudo: required
-go:
- - 1.4.2
- - 1.5.3
- - 1.6
- - tip
-os:
- - linux
- - osx
-env:
- - GOARCH=amd64 DOCKER_VERSION=1.8.3
- - GOARCH=386 DOCKER_VERSION=1.8.3
- - GOARCH=amd64 DOCKER_VERSION=1.9.1
- - GOARCH=386 DOCKER_VERSION=1.9.1
- - GOARCH=amd64 DOCKER_VERSION=1.10.3
- - GOARCH=386 DOCKER_VERSION=1.10.3
-install:
- - travis_retry travis-scripts/install.bash
-script:
- - travis-scripts/run-tests.bash
-services:
- - docker
-matrix:
- fast_finish: true
- allow_failures:
- - go: tip
diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
index bb71cc345..a874f82af 100644
--- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS
+++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
@@ -4,10 +4,14 @@ Abhishek Chanda
Adam Bell-Hanssen
Adrien Kohlbecker
Aldrin Leal
+Alex Dadgar
+Alfonso Acosta
+André Carvalho
Andreas Jaekle
Andrews Medina
Andrey Sibiryov
Andy Goldstein
+Anirudh Aithal
Antonio Murdaca
Artem Sidorenko
Ben Marini
@@ -22,11 +26,14 @@ Brian Palmer
Bryan Boreham
Burke Libbey
Carlos Diaz-Padron
+Carson A
Cesar Wong
Cezar Sa Espinola
+Changping Chen
Cheah Chu Yeow
cheneydeng
Chris Bednarski
+Christian Stewart
CMGS
Colin Hebert
Craig Jellick
@@ -44,6 +51,7 @@ Ed
Elias G. Schneevoigt
Erez Horev
Eric Anderson
+Ethan Mosbaugh
Ewout Prangsma
Fabio Rehm
Fatih Arslan
@@ -66,12 +74,16 @@ Jeff Mitchell
Jeffrey Hulten
Jen Andre
Jérôme Laurens
+Jim Minter
Johan Euphrosine
John Hughes
+Jorge Marey
Kamil Domanski
Karan Misra
Ken Herner
+Kevin Xu
Kim, Hirokuni
+Kostas Lekkas
Kyle Allan
Liron Levin
Lior Yankovich
@@ -81,6 +93,8 @@ Lucas Clemente
Lucas Weiblen
Lyon Hill
Mantas Matelis
+Marguerite des Trois Maisons
+Mariusz Borsa
Martin Sweeney
Máximo Cuadros Ortiz
Michael Schmatz
@@ -100,15 +114,21 @@ Peter Edge
Peter Jihoon Kim
Phil Lu
Philippe Lafoucrière
+Radek Simko
Rafe Colton
+Raphaël Pinson
Rob Miller
+Robbert Klarenbeek
Robert Williamson
Roman Khlystik
+Russell Haering
Salvador Gironès
Sam Rijs
Sami Wagiaalla
Samuel Archambault
Samuel Karp
+Seth Jennings
+Shane Xie
Silas Sewell
Simon Eskildsen
Simon Menke
@@ -117,15 +137,19 @@ Soulou
Sridhar Ratnakumar
Summer Mousa
Sunjin Lee
-Tarsis Azevedo
+Swaroop Ramachandra
+Tarsis Azevedo
Tim Schindler
Timothy St. Clair
Tobi Knaup
Tom Wilkie
Tonic
ttyh061
+upccup
Victor Marmol
Vincenzo Prignano
+Vlad Alexandru Ionescu
+Weitao Zhou
Wiliam Souza
Ye Yin
Yu, Zou
diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE
index b1cdd4cd2..11c9e2889 100644
--- a/vendor/github.com/fsouza/go-dockerclient/LICENSE
+++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2016, go-dockerclient authors
+Copyright (c) 2013-2016, go-dockerclient authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile
index dd8c73b1b..483aa1bb4 100644
--- a/vendor/github.com/fsouza/go-dockerclient/Makefile
+++ b/vendor/github.com/fsouza/go-dockerclient/Makefile
@@ -1,6 +1,5 @@
.PHONY: \
all \
- vendor \
lint \
vet \
fmt \
@@ -8,50 +7,35 @@
pretest \
test \
integration \
- cov \
clean
-PKGS = . ./testing
-
all: test
-vendor:
- @ go get -v github.com/mjibson/party
- party -d external -c -u
-
lint:
@ go get -v github.com/golang/lint/golint
- @for file in $$(git ls-files '*.go' | grep -v 'external/'); do \
- export output="$$(golint $${file} | grep -v 'type name will be used as docker.DockerInfo')"; \
- [ -n "$${output}" ] && echo "$${output}" && export status=1; \
- done; \
- exit $${status:-0}
+ [ -z "$$(golint . | grep -v 'type name will be used as docker.DockerInfo' | grep -v 'context.Context should be the first' | tee /dev/stderr)" ]
vet:
- $(foreach pkg,$(PKGS),go vet $(pkg);)
+ go vet ./...
fmt:
- gofmt -s -w $(PKGS)
+ gofmt -s -w .
fmtcheck:
- @ export output=$$(gofmt -s -d $(PKGS)); \
- [ -n "$${output}" ] && echo "$${output}" && export status=1; \
- exit $${status:-0}
+ [ -z "$$(gofmt -s -d . | tee /dev/stderr)" ]
-pretest: lint vet fmtcheck
+testdeps:
+ go get -d -t ./...
+
+pretest: testdeps lint vet fmtcheck
gotest:
- $(foreach pkg,$(PKGS),go test $(pkg) || exit;)
+ go test $(GO_TEST_FLAGS) ./...
test: pretest gotest
integration:
go test -tags docker_integration -run TestIntegration -v
-cov:
- @ go get -v github.com/axw/gocov/gocov
- @ go get golang.org/x/tools/cmd/cover
- gocov test | gocov report
-
clean:
- $(foreach pkg,$(PKGS),go clean $(pkg) || exit;)
+ go clean ./...
diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.markdown
index b915039f1..c99985aef 100644
--- a/vendor/github.com/fsouza/go-dockerclient/README.markdown
+++ b/vendor/github.com/fsouza/go-dockerclient/README.markdown
@@ -1,10 +1,12 @@
# go-dockerclient
-[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient)
+[![Travis Build Status](https://travis-ci.org/fsouza/go-dockerclient.svg?branch=master)](https://travis-ci.org/fsouza/go-dockerclient)
+[![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/4m374pti06ubg2l7?svg=true)](https://ci.appveyor.com/project/fsouza/go-dockerclient)
[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
This package presents a client for the Docker remote API. It also provides
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/).
+It currently supports the Docker API up to version 1.23.
This package also provides support for docker's network API, which is a simple
passthrough to the libnetwork remote API. Note that docker's network API is
@@ -13,13 +15,6 @@ DOCKER_EXPERIMENTAL is defined during the docker build process.
For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
-## Vendoring
-
-If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored,
-please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient
-is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339)
-for details.
-
## Example
```go
@@ -97,7 +92,7 @@ All development commands can be seen in the [Makefile](Makefile).
Commited code must pass:
-* [golint](https://github.com/golang/lint)
+* [golint](https://github.com/golang/lint) (with some exceptions, see the Makefile).
* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
* [gofmt](https://golang.org/cmd/gofmt)
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml
new file mode 100644
index 000000000..c6cc73e62
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml
@@ -0,0 +1,16 @@
+version: '{build}'
+platform: x64
+clone_depth: 2
+clone_folder: c:\gopath\src\github.com\fsouza\go-dockerclient
+environment:
+ GOPATH: c:\gopath
+ GOVERSION: 1.7.3
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - rmdir c:\go /s /q
+ - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip
+ - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL
+build_script:
+ - go get -d -t ./...
+test_script:
+ - go test ./...
diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go
index 1be277c96..5ba946841 100644
--- a/vendor/github.com/fsouza/go-dockerclient/auth.go
+++ b/vendor/github.com/fsouza/go-dockerclient/auth.go
@@ -11,6 +11,7 @@ import (
"errors"
"fmt"
"io"
+ "io/ioutil"
"os"
"path"
"strings"
@@ -45,23 +46,47 @@ type dockerConfig struct {
Email string `json:"email"`
}
-// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the
-// ~/.dockercfg file.
-func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
- var r io.Reader
- var err error
- p := path.Join(os.Getenv("HOME"), ".docker", "config.json")
- r, err = os.Open(p)
+// NewAuthConfigurationsFromFile returns AuthConfigurations from a path containing JSON
+// in the same format as the .dockercfg file.
+func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) {
+ r, err := os.Open(path)
if err != nil {
- p := path.Join(os.Getenv("HOME"), ".dockercfg")
- r, err = os.Open(p)
- if err != nil {
- return nil, err
- }
+ return nil, err
}
return NewAuthConfigurations(r)
}
+func cfgPaths(dockerConfigEnv string, homeEnv string) []string {
+ var paths []string
+ if dockerConfigEnv != "" {
+ paths = append(paths, path.Join(dockerConfigEnv, "config.json"))
+ }
+ if homeEnv != "" {
+ paths = append(paths, path.Join(homeEnv, ".docker", "config.json"))
+ paths = append(paths, path.Join(homeEnv, ".dockercfg"))
+ }
+ return paths
+}
+
+// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from
+// system config files. The following files are checked in the order listed:
+// - $DOCKER_CONFIG/config.json if DOCKER_CONFIG set in the environment,
+// - $HOME/.docker/config.json
+// - $HOME/.dockercfg
+func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
+ err := fmt.Errorf("No docker configuration found")
+ var auths *AuthConfigurations
+
+ pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME"))
+ for _, path := range pathsToTry {
+ auths, err = NewAuthConfigurationsFromFile(path)
+ if err == nil {
+ return auths, nil
+ }
+ }
+ return auths, err
+}
+
// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
// same format as the .dockercfg file.
func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
@@ -122,17 +147,36 @@ func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
return c, nil
}
+// AuthStatus returns the authentication status for Docker API versions >= 1.23.
+type AuthStatus struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ IdentityToken string `json:"IdentityToken,omitempty" yaml:"IdentityToken,omitempty"`
+}
+
// AuthCheck validates the given credentials. It returns nil if successful.
//
-// See https://goo.gl/m2SleN for more details.
-func (c *Client) AuthCheck(conf *AuthConfiguration) error {
+// For Docker API versions >= 1.23, the AuthStatus struct will be populated, otherwise it will be empty.`
+//
+// See https://goo.gl/6nsZkH for more details.
+func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) {
+ var authStatus AuthStatus
if conf == nil {
- return fmt.Errorf("conf is nil")
+ return authStatus, fmt.Errorf("conf is nil")
}
resp, err := c.do("POST", "/auth", doOptions{data: conf})
if err != nil {
- return err
+ return authStatus, err
}
- resp.Body.Close()
- return nil
+ defer resp.Body.Close()
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return authStatus, err
+ }
+ if len(data) == 0 {
+ return authStatus, nil
+ }
+ if err := json.Unmarshal(data, &authStatus); err != nil {
+ return authStatus, err
+ }
+ return authStatus, nil
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/change.go b/vendor/github.com/fsouza/go-dockerclient/change.go
index d133594d4..3f936b223 100644
--- a/vendor/github.com/fsouza/go-dockerclient/change.go
+++ b/vendor/github.com/fsouza/go-dockerclient/change.go
@@ -23,7 +23,7 @@ const (
// Change represents a change in a container.
//
-// See https://goo.gl/9GsTIF for more details.
+// See https://goo.gl/Wo0JJp for more details.
type Change struct {
Path string
Kind ChangeType
diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go
index d893ba684..3532e075d 100644
--- a/vendor/github.com/fsouza/go-dockerclient/client.go
+++ b/vendor/github.com/fsouza/go-dockerclient/client.go
@@ -1,10 +1,10 @@
-// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Copyright 2013 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package docker provides a client for the Docker remote API.
//
-// See https://goo.gl/G3plxW for more details on the remote API.
+// See https://goo.gl/o2v3rk for more details on the remote API.
package docker
import (
@@ -24,18 +24,25 @@ import (
"os"
"path/filepath"
"reflect"
- "runtime"
"strconv"
"strings"
+ "sync/atomic"
"time"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
- "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
+ "github.com/docker/docker/opts"
+ "github.com/docker/docker/pkg/homedir"
+ "github.com/docker/docker/pkg/stdcopy"
+ "github.com/hashicorp/go-cleanhttp"
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
)
-const userAgent = "go-dockerclient"
+const (
+ userAgent = "go-dockerclient"
+
+ unixProtocol = "unix"
+ namedPipeProtocol = "npipe"
+)
var (
// ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
@@ -44,9 +51,12 @@ var (
// ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
- apiVersion112, _ = NewAPIVersion("1.12")
+ // ErrInactivityTimeout is returned when a streamable call has been inactive for some time.
+ ErrInactivityTimeout = errors.New("inactivity time exceeded timeout")
+ apiVersion112, _ = NewAPIVersion("1.12")
apiVersion119, _ = NewAPIVersion("1.19")
+ apiVersion124, _ = NewAPIVersion("1.24")
)
// APIVersion is an internal representation of a version of the Remote API.
@@ -131,7 +141,7 @@ type Client struct {
SkipServerVersionCheck bool
HTTPClient *http.Client
TLSConfig *tls.Config
- Dialer *net.Dialer
+ Dialer Dialer
endpoint string
endpointURL *url.URL
@@ -139,7 +149,14 @@ type Client struct {
requestedAPIVersion APIVersion
serverAPIVersion APIVersion
expectedAPIVersion APIVersion
- unixHTTPClient *http.Client
+ nativeHTTPClient *http.Client
+}
+
+// Dialer is an interface that allows network connections to be dialed
+// (net.Dialer fulfills this interface) and named pipes (a shim using
+// winio.DialPipe)
+type Dialer interface {
+ Dial(network, address string) (net.Conn, error)
}
// NewClient returns a Client instance ready for communication with the given
@@ -192,14 +209,16 @@ func NewVersionedClient(endpoint string, apiVersionString string) (*Client, erro
return nil, err
}
}
- return &Client{
+ c := &Client{
HTTPClient: cleanhttp.DefaultClient(),
Dialer: &net.Dialer{},
endpoint: endpoint,
endpointURL: u,
eventMonitor: new(eventMonitoringState),
requestedAPIVersion: requestedAPIVersion,
- }, nil
+ }
+ c.initializeNativeClient()
+ return c, nil
}
// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
@@ -301,7 +320,7 @@ func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock,
if err != nil {
return nil, err
}
- return &Client{
+ c := &Client{
HTTPClient: &http.Client{Transport: tr},
TLSConfig: tlsConfig,
Dialer: &net.Dialer{},
@@ -309,7 +328,21 @@ func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock,
endpointURL: u,
eventMonitor: new(eventMonitoringState),
requestedAPIVersion: requestedAPIVersion,
- }, nil
+ }
+ c.initializeNativeClient()
+ return c, nil
+}
+
+// SetTimeout takes a timeout and applies it to both the HTTPClient and
+// nativeHTTPClient. It should not be called concurrently with any other Client
+// methods.
+func (c *Client) SetTimeout(t time.Duration) {
+ if c.HTTPClient != nil {
+ c.HTTPClient.Timeout = t
+ }
+ if c.nativeHTTPClient != nil {
+ c.nativeHTTPClient.Timeout = t
+ }
}
func (c *Client) checkAPIVersion() error {
@@ -338,7 +371,7 @@ func (c *Client) Endpoint() string {
// Ping pings the docker server
//
-// See https://goo.gl/kQCfJj for more details.
+// See https://goo.gl/wYfgY1 for more details.
func (c *Client) Ping() error {
path := "/_ping"
resp, err := c.do("GET", path, doOptions{})
@@ -375,6 +408,7 @@ type doOptions struct {
data interface{}
forceJSON bool
headers map[string]string
+ context context.Context
}
func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
@@ -395,12 +429,14 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e
httpClient := c.HTTPClient
protocol := c.endpointURL.Scheme
var u string
- if protocol == "unix" {
- httpClient = c.unixClient()
- u = c.getFakeUnixURL(path)
- } else {
+ switch protocol {
+ case unixProtocol, namedPipeProtocol:
+ httpClient = c.nativeHTTPClient
+ u = c.getFakeNativeURL(path)
+ default:
u = c.getURL(path)
}
+
req, err := http.NewRequest(method, u, params)
if err != nil {
return nil, err
@@ -415,12 +451,19 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e
for k, v := range doOptions.headers {
req.Header.Set(k, v)
}
- resp, err := httpClient.Do(req)
+
+ ctx := doOptions.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ resp, err := ctxhttp.Do(ctx, httpClient, req)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, ErrConnectionRefused
}
- return nil, err
+
+ return nil, chooseError(ctx, err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return nil, newError(resp)
@@ -436,8 +479,22 @@ type streamOptions struct {
in io.Reader
stdout io.Writer
stderr io.Writer
- // timeout is the inital connection timeout
+ // timeout is the initial connection timeout
timeout time.Duration
+ // Timeout with no data is received, it's reset every time new data
+ // arrives
+ inactivityTimeout time.Duration
+ context context.Context
+}
+
+// if error in context, return that instead of generic http error
+func chooseError(ctx context.Context, err error) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ return err
+ }
}
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
@@ -470,16 +527,29 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error
if streamOptions.stderr == nil {
streamOptions.stderr = ioutil.Discard
}
- if protocol == "unix" {
- dial, err := c.Dialer.Dial(protocol, address)
+
+ // make a sub-context so that our active cancellation does not affect parent
+ ctx := streamOptions.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ subCtx, cancelRequest := context.WithCancel(ctx)
+ defer cancelRequest()
+
+ if protocol == unixProtocol || protocol == namedPipeProtocol {
+ var dial net.Conn
+ dial, err = c.Dialer.Dial(protocol, address)
if err != nil {
return err
}
- defer dial.Close()
+ go func() {
+ <-subCtx.Done()
+ dial.Close()
+ }()
breader := bufio.NewReader(dial)
err = req.Write(dial)
if err != nil {
- return err
+ return chooseError(subCtx, err)
}
// ReadResponse may hang if server does not replay
@@ -495,47 +565,39 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error
if strings.Contains(err.Error(), "connection refused") {
return ErrConnectionRefused
}
- return err
+
+ return chooseError(subCtx, err)
}
} else {
- if resp, err = c.HTTPClient.Do(req); err != nil {
+ if resp, err = ctxhttp.Do(subCtx, c.HTTPClient, req); err != nil {
if strings.Contains(err.Error(), "connection refused") {
return ErrConnectionRefused
}
- return err
+ return chooseError(subCtx, err)
}
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return newError(resp)
}
- if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
- // if we want to get raw json stream, just copy it back to output
- // without decoding it
- if streamOptions.rawJSONStream {
- _, err = io.Copy(streamOptions.stdout, resp.Body)
- return err
+ var canceled uint32
+ if streamOptions.inactivityTimeout > 0 {
+ ch := handleInactivityTimeout(&streamOptions, cancelRequest, &canceled)
+ defer close(ch)
+ }
+ err = handleStreamResponse(resp, &streamOptions)
+ if err != nil {
+ if atomic.LoadUint32(&canceled) != 0 {
+ return ErrInactivityTimeout
}
- dec := json.NewDecoder(resp.Body)
- for {
- var m jsonMessage
- if err := dec.Decode(&m); err == io.EOF {
- break
- } else if err != nil {
- return err
- }
- if m.Stream != "" {
- fmt.Fprint(streamOptions.stdout, m.Stream)
- } else if m.Progress != "" {
- fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
- } else if m.Error != "" {
- return errors.New(m.Error)
- }
- if m.Status != "" {
- fmt.Fprintln(streamOptions.stdout, m.Status)
- }
- }
- } else {
+ return chooseError(subCtx, err)
+ }
+ return nil
+}
+
+func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) error {
+ var err error
+ if !streamOptions.useJSONDecoder && resp.Header.Get("Content-Type") != "application/json" {
if streamOptions.setRawTerminal {
_, err = io.Copy(streamOptions.stdout, resp.Body)
} else {
@@ -543,9 +605,74 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error
}
return err
}
+ // if we want to get raw json stream, just copy it back to output
+ // without decoding it
+ if streamOptions.rawJSONStream {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ return err
+ }
+ dec := json.NewDecoder(resp.Body)
+ for {
+ var m jsonMessage
+ if err := dec.Decode(&m); err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ if m.Stream != "" {
+ fmt.Fprint(streamOptions.stdout, m.Stream)
+ } else if m.Progress != "" {
+ fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
+ } else if m.Error != "" {
+ return errors.New(m.Error)
+ }
+ if m.Status != "" {
+ fmt.Fprintln(streamOptions.stdout, m.Status)
+ }
+ }
return nil
}
+type proxyWriter struct {
+ io.Writer
+ calls uint64
+}
+
+func (p *proxyWriter) callCount() uint64 {
+ return atomic.LoadUint64(&p.calls)
+}
+
+func (p *proxyWriter) Write(data []byte) (int, error) {
+ atomic.AddUint64(&p.calls, 1)
+ return p.Writer.Write(data)
+}
+
+func handleInactivityTimeout(options *streamOptions, cancelRequest func(), canceled *uint32) chan<- struct{} {
+ done := make(chan struct{})
+ proxyStdout := &proxyWriter{Writer: options.stdout}
+ proxyStderr := &proxyWriter{Writer: options.stderr}
+ options.stdout = proxyStdout
+ options.stderr = proxyStderr
+ go func() {
+ var lastCallCount uint64
+ for {
+ select {
+ case <-time.After(options.inactivityTimeout):
+ case <-done:
+ return
+ }
+ curCallCount := proxyStdout.callCount() + proxyStderr.callCount()
+ if curCallCount == lastCallCount {
+ atomic.AddUint32(canceled, 1)
+ cancelRequest()
+ return
+ }
+ lastCallCount = curCallCount
+ }
+ }()
+ return done
+}
+
type hijackOptions struct {
success chan struct{}
setRawTerminal bool
@@ -594,13 +721,17 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close
req.Header.Set("Upgrade", "tcp")
protocol := c.endpointURL.Scheme
address := c.endpointURL.Path
- if protocol != "unix" {
+ if protocol != unixProtocol && protocol != namedPipeProtocol {
protocol = "tcp"
address = c.endpointURL.Host
}
var dial net.Conn
- if c.TLSConfig != nil && protocol != "unix" {
- dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
+ if c.TLSConfig != nil && protocol != unixProtocol && protocol != namedPipeProtocol {
+ netDialer, ok := c.Dialer.(*net.Dialer)
+ if !ok {
+ return nil, ErrTLSNotSupported
+ }
+ dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig)
if err != nil {
return nil, err
}
@@ -611,7 +742,7 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close
}
}
- errs := make(chan error)
+ errs := make(chan error, 1)
quit := make(chan struct{})
go func() {
clientconn := httputil.NewClientConn(dial, nil)
@@ -625,7 +756,7 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close
defer rwc.Close()
errChanOut := make(chan error, 1)
- errChanIn := make(chan error, 1)
+ errChanIn := make(chan error, 2)
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
close(errChanOut)
} else {
@@ -675,14 +806,12 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close
select {
case errIn = <-errChanIn:
case <-quit:
- return
}
var errOut error
select {
case errOut = <-errChanOut:
case <-quit:
- return
}
if errIn != nil {
@@ -703,7 +832,7 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (Close
func (c *Client) getURL(path string) string {
urlStr := strings.TrimRight(c.endpointURL.String(), "/")
- if c.endpointURL.Scheme == "unix" {
+ if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol {
urlStr = ""
}
if c.requestedAPIVersion != nil {
@@ -712,9 +841,9 @@ func (c *Client) getURL(path string) string {
return fmt.Sprintf("%s%s", urlStr, path)
}
-// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX
+// getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX
// domain socket to the given path.
-func (c *Client) getFakeUnixURL(path string) string {
+func (c *Client) getFakeNativeURL(path string) string {
u := *c.endpointURL // Copy.
// Override URL so that net/http will not complain.
@@ -728,21 +857,6 @@ func (c *Client) getFakeUnixURL(path string) string {
return fmt.Sprintf("%s%s", urlStr, path)
}
-func (c *Client) unixClient() *http.Client {
- if c.unixHTTPClient != nil {
- return c.unixHTTPClient
- }
- socketPath := c.endpointURL.Path
- tr := &http.Transport{
- Dial: func(network, addr string) (net.Conn, error) {
- return c.Dialer.Dial("unix", socketPath)
- },
- }
- cleanhttp.SetTransportFinalizer(tr)
- c.unixHTTPClient = &http.Client{Transport: tr}
- return c.unixHTTPClient
-}
-
type jsonMessage struct {
Status string `json:"status,omitempty"`
Progress string `json:"progress,omitempty"`
@@ -845,11 +959,11 @@ func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
if err != nil {
return nil, ErrInvalidEndpoint
}
- if tls {
+ if tls && u.Scheme != "unix" {
u.Scheme = "https"
}
switch u.Scheme {
- case "unix":
+ case unixProtocol, namedPipeProtocol:
return u, nil
case "http", "https", "tcp":
_, port, err := net.SplitHostPort(u.Host)
@@ -888,10 +1002,7 @@ func getDockerEnv() (*dockerEnv, error) {
dockerHost := os.Getenv("DOCKER_HOST")
var err error
if dockerHost == "" {
- dockerHost, err = DefaultDockerHost()
- if err != nil {
- return nil, err
- }
+ dockerHost = opts.DefaultHost
}
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
var dockerCertPath string
@@ -915,16 +1026,3 @@ func getDockerEnv() (*dockerEnv, error) {
dockerCertPath: dockerCertPath,
}, nil
}
-
-// DefaultDockerHost returns the default docker socket for the current OS
-func DefaultDockerHost() (string, error) {
- var defaultHost string
- if runtime.GOOS == "windows" {
- // If we do not have a host, default to TCP socket on Windows
- defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
- } else {
- // If we do not have a host, default to unix socket
- defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
- }
- return opts.ValidateHost(defaultHost)
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/fsouza/go-dockerclient/client_unix.go
new file mode 100644
index 000000000..0480813c0
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/client_unix.go
@@ -0,0 +1,27 @@
+// +build !windows
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "net"
+ "net/http"
+
+ "github.com/hashicorp/go-cleanhttp"
+)
+
+// initializeNativeClient initializes the native Unix domain socket client on
+// Unix-style operating systems
+func (c *Client) initializeNativeClient() {
+ if c.endpointURL.Scheme != unixProtocol {
+ return
+ }
+ socketPath := c.endpointURL.Path
+ tr := cleanhttp.DefaultTransport()
+ tr.Dial = func(network, addr string) (net.Conn, error) {
+ return c.Dialer.Dial(unixProtocol, socketPath)
+ }
+ c.nativeHTTPClient = &http.Client{Transport: tr}
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/fsouza/go-dockerclient/client_windows.go
new file mode 100644
index 000000000..9dc7a37e8
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/client_windows.go
@@ -0,0 +1,41 @@
+// +build windows
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/hashicorp/go-cleanhttp"
+)
+
+const namedPipeConnectTimeout = 2 * time.Second
+
+type pipeDialer struct {
+ dialFunc func(network, addr string) (net.Conn, error)
+}
+
+func (p pipeDialer) Dial(network, address string) (net.Conn, error) {
+ return p.dialFunc(network, address)
+}
+
+// initializeNativeClient initializes the native Named Pipe client for Windows
+func (c *Client) initializeNativeClient() {
+ if c.endpointURL.Scheme != namedPipeProtocol {
+ return
+ }
+ namedPipePath := c.endpointURL.Path
+ dialFunc := func(network, addr string) (net.Conn, error) {
+ timeout := namedPipeConnectTimeout
+ return winio.DialPipe(namedPipePath, &timeout)
+ }
+ tr := cleanhttp.DefaultTransport()
+ tr.Dial = dialFunc
+ c.Dialer = &pipeDialer{dialFunc}
+ c.nativeHTTPClient = &http.Client{Transport: tr}
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go
index fcf115351..cb6ed609f 100644
--- a/vendor/github.com/fsouza/go-dockerclient/container.go
+++ b/vendor/github.com/fsouza/go-dockerclient/container.go
@@ -1,4 +1,4 @@
-// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Copyright 2013 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -15,7 +15,8 @@ import (
"strings"
"time"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units"
+ "github.com/docker/go-units"
+ "golang.org/x/net/context"
)
// ErrContainerAlreadyExists is the error returned by CreateContainer when the
@@ -24,7 +25,7 @@ var ErrContainerAlreadyExists = errors.New("container already exists")
// ListContainersOptions specify parameters to the ListContainers function.
//
-// See https://goo.gl/47a6tO for more details.
+// See https://goo.gl/kaOHGw for more details.
type ListContainersOptions struct {
All bool
Size bool
@@ -32,6 +33,7 @@ type ListContainersOptions struct {
Since string
Before string
Filters map[string][]string
+ Context context.Context
}
// APIPort is a type that represents a port mapping returned by the Docker API
@@ -42,6 +44,17 @@ type APIPort struct {
IP string `json:"IP,omitempty" yaml:"IP,omitempty"`
}
+// APIMount represents a mount point for a container.
+type APIMount struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Source string `json:"Source,omitempty" yaml:"Source,omitempty"`
+ Destination string `json:"Destination,omitempty" yaml:"Destination,omitempty"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
+ Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty"`
+ RW bool `json:"RW,omitempty" yaml:"RW,omitempty"`
+ Propogation string `json:"Propogation,omitempty" yaml:"Propogation,omitempty"`
+}
+
// APIContainers represents each container in the list returned by
// ListContainers.
type APIContainers struct {
@@ -49,6 +62,7 @@ type APIContainers struct {
Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
Command string `json:"Command,omitempty" yaml:"Command,omitempty"`
Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ State string `json:"State,omitempty" yaml:"State,omitempty"`
Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"`
SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"`
@@ -56,6 +70,7 @@ type APIContainers struct {
Names []string `json:"Names,omitempty" yaml:"Names,omitempty"`
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
Networks NetworkList `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"`
+ Mounts []APIMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
}
// NetworkList encapsulates a map of networks, as returned by the Docker API in
@@ -66,10 +81,10 @@ type NetworkList struct {
// ListContainers returns a slice of containers matching the given criteria.
//
-// See https://goo.gl/47a6tO for more details.
+// See https://goo.gl/kaOHGw for more details.
func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
path := "/containers/json?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
@@ -99,6 +114,21 @@ func (p Port) Proto() string {
return parts[1]
}
+// HealthCheck represents one check of health.
+type HealthCheck struct {
+ Start time.Time `json:"Start,omitempty" yaml:"Start,omitempty"`
+ End time.Time `json:"End,omitempty" yaml:"End,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ Output string `json:"Output,omitempty" yaml:"Output,omitempty"`
+}
+
+// Health represents the health of a container.
+type Health struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ FailingStreak int `json:"FailingStreak,omitempty" yaml:"FailingStreak,omitempty"`
+ Log []HealthCheck `json:"Log,omitempty" yaml:"Log,omitempty"`
+}
+
// State represents the state of a container.
type State struct {
Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
@@ -113,6 +143,7 @@ type State struct {
Error string `json:"Error,omitempty" yaml:"Error,omitempty"`
StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"`
FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"`
+ Health Health `json:"Health,omitempty" yaml:"Health,omitempty"`
}
// String returns a human-readable description of the state
@@ -264,30 +295,36 @@ type Config struct {
KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
- AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
- AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
- AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
+ PublishService string `json:"PublishService,omitempty" yaml:"PublishService,omitempty"`
StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"`
- Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
- OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
- StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
Cmd []string `json:"Cmd" yaml:"Cmd"`
+ Healthcheck *HealthConfig `json:"Healthcheck,omitempty" yaml:"Healthcheck,omitempty"`
DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
- VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"`
- NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ ArgsEscaped bool `json:"ArgsEscaped,omitempty" yaml:"ArgsEscaped,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
+
+ // This is no longer used and has been kept here for backward
+ // compatibility, please use HostConfig.VolumesFrom.
+ VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
}
// Mount represents a mount point in the container.
@@ -309,15 +346,16 @@ type LogConfig struct {
Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"`
}
-// ULimit defines system-wide resource limitations
-// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users.
+// ULimit defines system-wide resource limitations This can help a lot in
+// system administration, e.g. when a user starts too many processes and
+// therefore makes the system unresponsive for other users.
type ULimit struct {
Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"`
Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"`
}
-// SwarmNode containers information about which Swarm node the container is on
+// SwarmNode containers information about which Swarm node the container is on.
type SwarmNode struct {
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
IP string `json:"IP,omitempty" yaml:"IP,omitempty"`
@@ -328,6 +366,36 @@ type SwarmNode struct {
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
}
+// GraphDriver contains information about the GraphDriver used by the
+// container.
+type GraphDriver struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Data map[string]string `json:"Data,omitempty" yaml:"Data,omitempty"`
+}
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature
+//
+// It has been added in the version 1.24 of the Docker API, available since
+// Docker 1.12.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:"Test,omitempty" yaml:"Test,omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:"Interval,omitempty" yaml:"Interval,omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:"Timeout,omitempty" yaml:"Timeout,omitempty"` // Timeout is the time to wait before considering the check to have hung.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:"Retries,omitempty" yaml:"Retries,omitempty"`
+}
+
// Container is the type encompasing everything about a container - its config,
// hostconfig, etc.
type Container struct {
@@ -355,10 +423,11 @@ type Container struct {
Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
- Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
- VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"`
- HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
- ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"`
+ Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
+ VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"`
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"`
+ GraphDriver *GraphDriver `json:"GraphDriver,omitempty" yaml:"GraphDriver,omitempty"`
RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"`
@@ -369,23 +438,29 @@ type Container struct {
//
// See https://goo.gl/Y6fXUy for more details.
type UpdateContainerOptions struct {
- BlkioWeight int `json:"BlkioWeight"`
- CPUShares int `json:"CpuShares"`
- CPUPeriod int `json:"CpuPeriod"`
- CPUQuota int `json:"CpuQuota"`
- CpusetCpus string `json:"CpusetCpus"`
- CpusetMems string `json:"CpusetMems"`
- Memory int `json:"Memory"`
- MemorySwap int `json:"MemorySwap"`
- MemoryReservation int `json:"MemoryReservation"`
- KernelMemory int `json:"KernelMemory"`
+ BlkioWeight int `json:"BlkioWeight"`
+ CPUShares int `json:"CpuShares"`
+ CPUPeriod int `json:"CpuPeriod"`
+ CPUQuota int `json:"CpuQuota"`
+ CpusetCpus string `json:"CpusetCpus"`
+ CpusetMems string `json:"CpusetMems"`
+ Memory int `json:"Memory"`
+ MemorySwap int `json:"MemorySwap"`
+ MemoryReservation int `json:"MemoryReservation"`
+ KernelMemory int `json:"KernelMemory"`
+ RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty"`
+ Context context.Context
}
// UpdateContainer updates the container at ID with the options
//
// See https://goo.gl/Y6fXUy for more details.
func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error {
- resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{data: opts, forceJSON: true})
+ resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{
+ data: opts,
+ forceJSON: true,
+ context: opts.Context,
+ })
if err != nil {
return err
}
@@ -395,20 +470,23 @@ func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error {
// RenameContainerOptions specify parameters to the RenameContainer function.
//
-// See https://goo.gl/laSOIy for more details.
+// See https://goo.gl/46inai for more details.
type RenameContainerOptions struct {
// ID of container to rename
ID string `qs:"-"`
// New name
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ Context context.Context
}
// RenameContainer updates and existing containers name
//
-// See https://goo.gl/laSOIy for more details.
+// See https://goo.gl/46inai for more details.
func (c *Client) RenameContainer(opts RenameContainerOptions) error {
- resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{})
+ resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{
+ context: opts.Context,
+ })
if err != nil {
return err
}
@@ -418,10 +496,22 @@ func (c *Client) RenameContainer(opts RenameContainerOptions) error {
// InspectContainer returns information about a container by its ID.
//
-// See https://goo.gl/RdIq0b for more details.
+// See https://goo.gl/FaI5JT for more details.
func (c *Client) InspectContainer(id string) (*Container, error) {
+ return c.inspectContainer(id, doOptions{})
+}
+
+// InspectContainerWithContext returns information about a container by its ID.
+// The context object can be used to cancel the inspect request.
+//
+// See https://goo.gl/FaI5JT for more details.
+func (c *Client) InspectContainerWithContext(id string, ctx context.Context) (*Container, error) {
+ return c.inspectContainer(id, doOptions{context: ctx})
+}
+
+func (c *Client) inspectContainer(id string, opts doOptions) (*Container, error) {
path := "/containers/" + id + "/json"
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do("GET", path, opts)
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchContainer{ID: id}
@@ -438,7 +528,7 @@ func (c *Client) InspectContainer(id string) (*Container, error) {
// ContainerChanges returns changes in the filesystem of the given container.
//
-// See https://goo.gl/9GsTIF for more details.
+// See https://goo.gl/15KKzh for more details.
func (c *Client) ContainerChanges(id string) ([]Change, error) {
path := "/containers/" + id + "/changes"
resp, err := c.do("GET", path, doOptions{})
@@ -458,17 +548,22 @@ func (c *Client) ContainerChanges(id string) ([]Change, error) {
// CreateContainerOptions specify parameters to the CreateContainer function.
//
-// See https://goo.gl/WxQzrr for more details.
+// See https://goo.gl/tyzwVM for more details.
type CreateContainerOptions struct {
- Name string
- Config *Config `qs:"-"`
- HostConfig *HostConfig `qs:"-"`
+ Name string
+ Config *Config `qs:"-"`
+ HostConfig *HostConfig `qs:"-"`
+ NetworkingConfig *NetworkingConfig `qs:"-"`
+ Context context.Context
}
// CreateContainer creates a new container, returning the container instance,
// or an error in case of failure.
//
-// See https://goo.gl/WxQzrr for more details.
+// The returned container instance contains only the container ID. To get more
+// details about the container after creating it, use InspectContainer.
+//
+// See https://goo.gl/tyzwVM for more details.
func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
path := "/containers/create?" + queryString(opts)
resp, err := c.do(
@@ -477,11 +572,14 @@ func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error
doOptions{
data: struct {
*Config
- HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ NetworkingConfig *NetworkingConfig `json:"NetworkingConfig,omitempty" yaml:"NetworkingConfig,omitempty"`
}{
opts.Config,
opts.HostConfig,
+ opts.NetworkingConfig,
},
+ context: opts.Context,
},
)
@@ -522,6 +620,8 @@ type KeyValuePair struct {
// - always: the docker daemon will always restart the container
// - on-failure: the docker daemon will restart the container on failures, at
// most MaximumRetryCount times
+// - unless-stopped: the docker daemon will always restart the container except
+// when user has manually stopped the container
// - no: the docker daemon will not restart the container automatically
type RestartPolicy struct {
Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
@@ -540,6 +640,12 @@ func RestartOnFailure(maxRetry int) RestartPolicy {
return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry}
}
+// RestartUnlessStopped returns a restart policy that tells the Docker daemon to
+// always restart the container except when user has manually stopped the container.
+func RestartUnlessStopped() RestartPolicy {
+ return RestartPolicy{Name: "unless-stopped"}
+}
+
// NeverRestart returns a restart policy that tells the Docker daemon to never
// restart the container on failures.
func NeverRestart() RestartPolicy {
@@ -556,8 +662,6 @@ type Device struct {
// BlockWeight represents a relative device weight for an individual device inside
// of a container
-//
-// See https://goo.gl/FSdP0H for more details.
type BlockWeight struct {
Path string `json:"Path,omitempty"`
Weight string `json:"Weight,omitempty"`
@@ -565,8 +669,6 @@ type BlockWeight struct {
// BlockLimit represents a read/write limit in IOPS or Bandwidth for a device
// inside of a container
-//
-// See https://goo.gl/FSdP0H for more details.
type BlockLimit struct {
Path string `json:"Path,omitempty"`
Rate string `json:"Rate,omitempty"`
@@ -581,15 +683,14 @@ type HostConfig struct {
GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"`
ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"`
LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"`
- Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"`
Links []string `json:"Links,omitempty" yaml:"Links,omitempty"`
- PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"`
DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only
DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty"`
DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"`
ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"`
VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
+ UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty"`
NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"`
PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"`
@@ -597,36 +698,83 @@ type HostConfig struct {
RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"`
LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"`
- ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"`
+ Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty"`
CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"`
Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"`
+ KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"`
MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"`
- OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"`
CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"`
CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"`
CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"`
CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"`
- BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"`
- BlkioWeightDevice []BlockWeight `json:"BlkioWeightDevice,omitempty" yaml:"BlkioWeightDevice"`
- BlkioDeviceReadBps []BlockLimit `json:"BlkioDeviceReadBps,omitempty" yaml:"BlkioDeviceReadBps"`
- BlkioDeviceReadIOps []BlockLimit `json:"BlkioDeviceReadIOps,omitempty" yaml:"BlkioDeviceReadIOps"`
- BlkioDeviceWriteBps []BlockLimit `json:"BlkioDeviceWriteBps,omitempty" yaml:"BlkioDeviceWriteBps"`
- BlkioDeviceWriteIOps []BlockLimit `json:"BlkioDeviceWriteIOps,omitempty" yaml:"BlkioDeviceWriteIOps"`
+ BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight,omitempty"`
+ BlkioWeightDevice []BlockWeight `json:"BlkioWeightDevice,omitempty" yaml:"BlkioWeightDevice,omitempty"`
+ BlkioDeviceReadBps []BlockLimit `json:"BlkioDeviceReadBps,omitempty" yaml:"BlkioDeviceReadBps,omitempty"`
+ BlkioDeviceReadIOps []BlockLimit `json:"BlkioDeviceReadIOps,omitempty" yaml:"BlkioDeviceReadIOps,omitempty"`
+ BlkioDeviceWriteBps []BlockLimit `json:"BlkioDeviceWriteBps,omitempty" yaml:"BlkioDeviceWriteBps,omitempty"`
+ BlkioDeviceWriteIOps []BlockLimit `json:"BlkioDeviceWriteIOps,omitempty" yaml:"BlkioDeviceWriteIOps,omitempty"`
Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"`
VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty"`
+ PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"`
+ ShmSize int64 `json:"ShmSize,omitempty" yaml:"ShmSize,omitempty"`
+ Tmpfs map[string]string `json:"Tmpfs,omitempty" yaml:"Tmpfs,omitempty"`
+ Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
+ PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"`
+ ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
+ OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable,omitempty"`
+ AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty"`
+ StorageOpt map[string]string `json:"StorageOpt,omitempty" yaml:"StorageOpt,omitempty"`
+ Sysctls map[string]string `json:"Sysctls,omitempty" yaml:"Sysctls,omitempty"`
+}
+
+// NetworkingConfig represents the container's networking configuration for each of its interfaces
+// Carries the networking configs specified in the `docker run` and `docker network connect` commands
+type NetworkingConfig struct {
+ EndpointsConfig map[string]*EndpointConfig `json:"EndpointsConfig" yaml:"EndpointsConfig"` // Endpoint configs for each connecting network
}
// StartContainer starts a container, returning an error in case of failure.
//
-// See https://goo.gl/MrBAJv for more details.
+// Passing the HostConfig to this method has been deprecated in Docker API 1.22
+// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
+// 1.12.x). The client will ignore the parameter when communicating with Docker
+// API 1.24 or greater.
+//
+// See https://goo.gl/fbOSZy for more details.
func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
+ return c.startContainer(id, hostConfig, doOptions{})
+}
+
+// StartContainerWithContext starts a container, returning an error in case of
+// failure. The context can be used to cancel the outstanding start container
+// request.
+//
+// Passing the HostConfig to this method has been deprecated in Docker API 1.22
+// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
+// 1.12.x). The client will ignore the parameter when communicating with Docker
+// API 1.24 or greater.
+//
+// See https://goo.gl/fbOSZy for more details.
+func (c *Client) StartContainerWithContext(id string, hostConfig *HostConfig, ctx context.Context) error {
+ return c.startContainer(id, hostConfig, doOptions{context: ctx})
+}
+
+func (c *Client) startContainer(id string, hostConfig *HostConfig, opts doOptions) error {
path := "/containers/" + id + "/start"
- resp, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true})
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.LessThan(apiVersion124) {
+ opts.data = hostConfig
+ opts.forceJSON = true
+ }
+ resp, err := c.do("POST", path, opts)
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: id, Err: err}
@@ -643,10 +791,23 @@ func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
// StopContainer stops a container, killing it after the given timeout (in
// seconds).
//
-// See https://goo.gl/USqsFt for more details.
+// See https://goo.gl/R9dZcV for more details.
func (c *Client) StopContainer(id string, timeout uint) error {
+ return c.stopContainer(id, timeout, doOptions{})
+}
+
+// StopContainerWithContext stops a container, killing it after the given
+// timeout (in seconds). The context can be used to cancel the stop
+// container request.
+//
+// See https://goo.gl/R9dZcV for more details.
+func (c *Client) StopContainerWithContext(id string, timeout uint, ctx context.Context) error {
+ return c.stopContainer(id, timeout, doOptions{context: ctx})
+}
+
+func (c *Client) stopContainer(id string, timeout uint, opts doOptions) error {
path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
- resp, err := c.do("POST", path, doOptions{})
+ resp, err := c.do("POST", path, opts)
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: id}
@@ -663,7 +824,7 @@ func (c *Client) StopContainer(id string, timeout uint) error {
// RestartContainer stops a container, killing it after the given timeout (in
// seconds), during the stop process.
//
-// See https://goo.gl/QzsDnz for more details.
+// See https://goo.gl/MrAKQ5 for more details.
func (c *Client) RestartContainer(id string, timeout uint) error {
path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
resp, err := c.do("POST", path, doOptions{})
@@ -679,7 +840,7 @@ func (c *Client) RestartContainer(id string, timeout uint) error {
// PauseContainer pauses the given container.
//
-// See https://goo.gl/OF7W9X for more details.
+// See https://goo.gl/D1Yaii for more details.
func (c *Client) PauseContainer(id string) error {
path := fmt.Sprintf("/containers/%s/pause", id)
resp, err := c.do("POST", path, doOptions{})
@@ -695,7 +856,7 @@ func (c *Client) PauseContainer(id string) error {
// UnpauseContainer unpauses the given container.
//
-// See https://goo.gl/7dwyPA for more details.
+// See https://goo.gl/sZ2faO for more details.
func (c *Client) UnpauseContainer(id string) error {
path := fmt.Sprintf("/containers/%s/unpause", id)
resp, err := c.do("POST", path, doOptions{})
@@ -712,7 +873,7 @@ func (c *Client) UnpauseContainer(id string) error {
// TopResult represents the list of processes running in a container, as
// returned by /containers//top.
//
-// See https://goo.gl/Rb46aY for more details.
+// See https://goo.gl/FLwpPl for more details.
type TopResult struct {
Titles []string
Processes [][]string
@@ -720,7 +881,7 @@ type TopResult struct {
// TopContainer returns processes running inside a container
//
-// See https://goo.gl/Rb46aY for more details.
+// See https://goo.gl/FLwpPl for more details.
func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
var args string
var result TopResult
@@ -736,17 +897,18 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
return result, err
}
defer resp.Body.Close()
- if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
- return result, err
- }
- return result, nil
+ err = json.NewDecoder(resp.Body).Decode(&result)
+ return result, err
}
// Stats represents container statistics, returned by /containers//stats.
//
-// See https://goo.gl/GNmLHb for more details.
+// See https://goo.gl/Dk3Xio for more details.
type Stats struct {
- Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
+ Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
+ PidsStats struct {
+ Current uint64 `json:"current,omitempty" yaml:"current,omitempty"`
+ } `json:"pids_stats,omitempty" yaml:"pids_stats,omitempty"`
Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty"`
Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty"`
MemoryStats struct {
@@ -840,7 +1002,7 @@ type BlkioStatsEntry struct {
// StatsOptions specify parameters to the Stats function.
//
-// See https://goo.gl/GNmLHb for more details.
+// See https://goo.gl/Dk3Xio for more details.
type StatsOptions struct {
ID string
Stats chan<- *Stats
@@ -849,6 +1011,10 @@ type StatsOptions struct {
Done <-chan bool
// Initial connection timeout
Timeout time.Duration
+ // Timeout with no data is received, it's reset every time new data
+ // arrives
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
}
// Stats sends container statistics for the given container to the given channel.
@@ -859,7 +1025,7 @@ type StatsOptions struct {
// will close the given channel. Alternatively, function can be stopped by
// signaling on the Done channel.
//
-// See https://goo.gl/GNmLHb for more details.
+// See https://goo.gl/Dk3Xio for more details.
func (c *Client) Stats(opts StatsOptions) (retErr error) {
errC := make(chan error, 1)
readCloser, writeCloser := io.Pipe()
@@ -883,10 +1049,12 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) {
go func() {
err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
- rawJSONStream: true,
- useJSONDecoder: true,
- stdout: writeCloser,
- timeout: opts.Timeout,
+ rawJSONStream: true,
+ useJSONDecoder: true,
+ stdout: writeCloser,
+ timeout: opts.Timeout,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
})
if err != nil {
dockerError, ok := err.(*Error)
@@ -930,23 +1098,24 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) {
// KillContainerOptions represents the set of options that can be used in a
// call to KillContainer.
//
-// See https://goo.gl/hkS9i8 for more details.
+// See https://goo.gl/JnTxXZ for more details.
type KillContainerOptions struct {
// The ID of the container.
ID string `qs:"-"`
// The signal to send to the container. When omitted, Docker server
// will assume SIGKILL.
- Signal Signal
+ Signal Signal
+ Context context.Context
}
// KillContainer sends a signal to a container, returning an error in case of
// failure.
//
-// See https://goo.gl/hkS9i8 for more details.
+// See https://goo.gl/JnTxXZ for more details.
func (c *Client) KillContainer(opts KillContainerOptions) error {
path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{})
+ resp, err := c.do("POST", path, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: opts.ID}
@@ -959,7 +1128,7 @@ func (c *Client) KillContainer(opts KillContainerOptions) error {
// RemoveContainerOptions encapsulates options to remove a container.
//
-// See https://goo.gl/RQyX62 for more details.
+// See https://goo.gl/hL5IPC for more details.
type RemoveContainerOptions struct {
// The ID of the container.
ID string `qs:"-"`
@@ -970,15 +1139,16 @@ type RemoveContainerOptions struct {
// A flag that indicates whether Docker should remove the container
// even if it is currently running.
- Force bool
+ Force bool
+ Context context.Context
}
// RemoveContainer removes a container, returning an error in case of failure.
//
-// See https://goo.gl/RQyX62 for more details.
+// See https://goo.gl/hL5IPC for more details.
func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
path := "/containers/" + opts.ID + "?" + queryString(opts)
- resp, err := c.do("DELETE", path, doOptions{})
+ resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: opts.ID}
@@ -992,64 +1162,80 @@ func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
// UploadToContainerOptions is the set of options that can be used when
// uploading an archive into a container.
//
-// See https://goo.gl/Ss97HW for more details.
+// See https://goo.gl/g25o7u for more details.
type UploadToContainerOptions struct {
InputStream io.Reader `json:"-" qs:"-"`
Path string `qs:"path"`
NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"`
+ Context context.Context
}
// UploadToContainer uploads a tar archive to be extracted to a path in the
// filesystem of the container.
//
-// See https://goo.gl/Ss97HW for more details.
+// See https://goo.gl/g25o7u for more details.
func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error {
url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
return c.stream("PUT", url, streamOptions{
- in: opts.InputStream,
+ in: opts.InputStream,
+ context: opts.Context,
})
}
// DownloadFromContainerOptions is the set of options that can be used when
// downloading resources from a container.
//
-// See https://goo.gl/KnZJDX for more details.
+// See https://goo.gl/W49jxK for more details.
type DownloadFromContainerOptions struct {
- OutputStream io.Writer `json:"-" qs:"-"`
- Path string `qs:"path"`
+ OutputStream io.Writer `json:"-" qs:"-"`
+ Path string `qs:"path"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
}
// DownloadFromContainer downloads a tar archive of files or folders in a container.
//
-// See https://goo.gl/KnZJDX for more details.
+// See https://goo.gl/W49jxK for more details.
func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error {
url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
return c.stream("GET", url, streamOptions{
- setRawTerminal: true,
- stdout: opts.OutputStream,
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
})
}
// CopyFromContainerOptions has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer.
//
-// See https://goo.gl/R2jevW for more details.
+// See https://goo.gl/nWk2YQ for more details.
type CopyFromContainerOptions struct {
OutputStream io.Writer `json:"-"`
Container string `json:"-"`
Resource string
+ Context context.Context `json:"-"`
}
// CopyFromContainer has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer.
//
-// See https://goo.gl/R2jevW for more details.
+// See https://goo.gl/nWk2YQ for more details.
func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
if opts.Container == "" {
return &NoSuchContainer{ID: opts.Container}
}
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion124) {
+ return errors.New("go-dockerclient: CopyFromContainer is no longer available in Docker >= 1.12, use DownloadFromContainer instead")
+ }
url := fmt.Sprintf("/containers/%s/copy", opts.Container)
- resp, err := c.do("POST", url, doOptions{data: opts})
+ resp, err := c.do("POST", url, doOptions{
+ data: opts,
+ context: opts.Context,
+ })
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchContainer{ID: opts.Container}
@@ -1064,7 +1250,7 @@ func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
// WaitContainer blocks until the given container stops, return the exit code
// of the container status.
//
-// See https://goo.gl/Gc1rge for more details.
+// See https://goo.gl/4AGweZ for more details.
func (c *Client) WaitContainer(id string) (int, error) {
resp, err := c.do("POST", "/containers/"+id+"/wait", doOptions{})
if err != nil {
@@ -1083,7 +1269,7 @@ func (c *Client) WaitContainer(id string) (int, error) {
// CommitContainerOptions aggregates parameters to the CommitContainer method.
//
-// See https://goo.gl/mqfoCw for more details.
+// See https://goo.gl/CzIguf for more details.
type CommitContainerOptions struct {
Container string
Repository string `qs:"repo"`
@@ -1091,14 +1277,18 @@ type CommitContainerOptions struct {
Message string `qs:"comment"`
Author string
Run *Config `qs:"-"`
+ Context context.Context
}
// CommitContainer creates a new image from a container's changes.
//
-// See https://goo.gl/mqfoCw for more details.
+// See https://goo.gl/CzIguf for more details.
func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
path := "/commit?" + queryString(opts)
- resp, err := c.do("POST", path, doOptions{data: opts.Run})
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.Run,
+ context: opts.Context,
+ })
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchContainer{ID: opts.Container}
@@ -1116,13 +1306,23 @@ func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
// AttachToContainerOptions is the set of options that can be used when
// attaching to a container.
//
-// See https://goo.gl/NKpkFk for more details.
+// See https://goo.gl/JF10Zk for more details.
type AttachToContainerOptions struct {
Container string `qs:"-"`
InputStream io.Reader `qs:"-"`
OutputStream io.Writer `qs:"-"`
ErrorStream io.Writer `qs:"-"`
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{}
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+
// Get container logs, sending it to OutputStream.
Logs bool
@@ -1137,21 +1337,11 @@ type AttachToContainerOptions struct {
// Attach to stderr, and use ErrorStream.
Stderr bool
-
- // If set, after a successful connect, a sentinel will be sent and then the
- // client will block on receive before continuing.
- //
- // It must be an unbuffered channel. Using a buffered channel can lead
- // to unexpected behavior.
- Success chan struct{}
-
- // Use raw terminal? Usually true when the container contains a TTY.
- RawTerminal bool `qs:"-"`
}
// AttachToContainer attaches to a container, using the given options.
//
-// See https://goo.gl/NKpkFk for more details.
+// See https://goo.gl/JF10Zk for more details.
func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
cw, err := c.AttachToContainerNonBlocking(opts)
if err != nil {
@@ -1181,17 +1371,20 @@ func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (Cl
// LogsOptions represents the set of options used when getting logs from a
// container.
//
-// See https://goo.gl/yl8PGm for more details.
+// See https://goo.gl/krK0ZH for more details.
type LogsOptions struct {
- Container string `qs:"-"`
- OutputStream io.Writer `qs:"-"`
- ErrorStream io.Writer `qs:"-"`
- Follow bool
- Stdout bool
- Stderr bool
- Since int64
- Timestamps bool
- Tail string
+ Context context.Context
+ Container string `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Tail string
+
+ Since int64
+ Follow bool
+ Stdout bool
+ Stderr bool
+ Timestamps bool
// Use raw terminal? Usually true when the container contains a TTY.
RawTerminal bool `qs:"-"`
@@ -1199,7 +1392,7 @@ type LogsOptions struct {
// Logs gets stdout and stderr logs from the specified container.
//
-// See https://goo.gl/yl8PGm for more details.
+// See https://goo.gl/krK0ZH for more details.
func (c *Client) Logs(opts LogsOptions) error {
if opts.Container == "" {
return &NoSuchContainer{ID: opts.Container}
@@ -1209,15 +1402,17 @@ func (c *Client) Logs(opts LogsOptions) error {
}
path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
return c.stream("GET", path, streamOptions{
- setRawTerminal: opts.RawTerminal,
- stdout: opts.OutputStream,
- stderr: opts.ErrorStream,
+ setRawTerminal: opts.RawTerminal,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
})
}
// ResizeContainerTTY resizes the terminal to the given height and width.
//
-// See https://goo.gl/xERhCc for more details.
+// See https://goo.gl/FImjeq for more details.
func (c *Client) ResizeContainerTTY(id string, height, width int) error {
params := make(url.Values)
params.Set("h", strconv.Itoa(height))
@@ -1233,24 +1428,28 @@ func (c *Client) ResizeContainerTTY(id string, height, width int) error {
// ExportContainerOptions is the set of parameters to the ExportContainer
// method.
//
-// See https://goo.gl/dOkTyk for more details.
+// See https://goo.gl/yGJCIh for more details.
type ExportContainerOptions struct {
- ID string
- OutputStream io.Writer
+ ID string
+ OutputStream io.Writer
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
}
// ExportContainer export the contents of container id as tar archive
// and prints the exported contents to stdout.
//
-// See https://goo.gl/dOkTyk for more details.
+// See https://goo.gl/yGJCIh for more details.
func (c *Client) ExportContainer(opts ExportContainerOptions) error {
if opts.ID == "" {
return &NoSuchContainer{ID: opts.ID}
}
url := fmt.Sprintf("/containers/%s/export", opts.ID)
return c.stream("GET", url, streamOptions{
- setRawTerminal: true,
- stdout: opts.OutputStream,
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
})
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go
index 83b5cf52d..007d2b22b 100644
--- a/vendor/github.com/fsouza/go-dockerclient/event.go
+++ b/vendor/github.com/fsouza/go-dockerclient/event.go
@@ -1,4 +1,4 @@
-// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Copyright 2014 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -53,10 +53,12 @@ type APIActor struct {
}
type eventMonitoringState struct {
+ // `sync/atomic` expects the first word in an allocated struct to be 64-bit
+ // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details.
+ lastSeen int64
sync.RWMutex
sync.WaitGroup
enabled bool
- lastSeen *int64
C chan *APIEvents
errC chan error
listeners []chan<- *APIEvents
@@ -76,6 +78,10 @@ var (
// exists.
ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
+ // ErrTLSNotSupported is the error returned when the client does not support
+ // TLS (this applies to the Windows named pipe client).
+ ErrTLSNotSupported = errors.New("tls not supported by this client")
+
// EOFEvent is sent when the event listener receives an EOF error.
EOFEvent = &APIEvents{
Type: "EOF",
@@ -94,11 +100,7 @@ func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
return err
}
}
- err = c.eventMonitor.addListener(listener)
- if err != nil {
- return err
- }
- return nil
+ return c.eventMonitor.addListener(listener)
}
// RemoveEventListener removes a listener from the monitor.
@@ -107,7 +109,7 @@ func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
if err != nil {
return err
}
- if len(c.eventMonitor.listeners) == 0 {
+ if c.eventMonitor.listernersCount() == 0 {
c.eventMonitor.disableEventMonitoring()
}
return nil
@@ -148,6 +150,12 @@ func (eventState *eventMonitoringState) closeListeners() {
eventState.listeners = nil
}
+func (eventState *eventMonitoringState) listernersCount() int {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return len(eventState.listeners)
+}
+
func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
for _, b := range *list {
if b == a {
@@ -162,8 +170,7 @@ func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
defer eventState.Unlock()
if !eventState.enabled {
eventState.enabled = true
- var lastSeenDefault = int64(0)
- eventState.lastSeen = &lastSeenDefault
+ atomic.StoreInt64(&eventState.lastSeen, 0)
eventState.C = make(chan *APIEvents, 100)
eventState.errC = make(chan error, 1)
go eventState.monitorEvents(c)
@@ -226,11 +233,19 @@ func (eventState *eventMonitoringState) monitorEvents(c *Client) {
func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
var retries int
- var err error
- for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
+ eventState.RLock()
+ eventChan := eventState.C
+ errChan := eventState.errC
+ eventState.RUnlock()
+ err := c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan)
+ for ; err != nil && retries < maxMonitorConnRetries; retries++ {
waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
time.Sleep(time.Duration(waitTime) * time.Millisecond)
- err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
+ eventState.RLock()
+ eventChan = eventState.C
+ errChan = eventState.errC
+ eventState.RUnlock()
+ err = c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan)
}
return err
}
@@ -267,8 +282,8 @@ func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
eventState.Lock()
defer eventState.Unlock()
- if atomic.LoadInt64(eventState.lastSeen) < e.Time {
- atomic.StoreInt64(eventState.lastSeen, e.Time)
+ if atomic.LoadInt64(&eventState.lastSeen) < e.Time {
+ atomic.StoreInt64(&eventState.lastSeen, e.Time)
}
}
@@ -279,7 +294,7 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
}
protocol := c.endpointURL.Scheme
address := c.endpointURL.Path
- if protocol != "unix" {
+ if protocol != "unix" && protocol != "npipe" {
protocol = "tcp"
address = c.endpointURL.Host
}
@@ -288,7 +303,11 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
if c.TLSConfig == nil {
dial, err = c.Dialer.Dial(protocol, address)
} else {
- dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
+ netDialer, ok := c.Dialer.(*net.Dialer)
+ if !ok {
+ return ErrTLSNotSupported
+ }
+ dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig)
}
if err != nil {
return err
@@ -310,10 +329,12 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
var event APIEvents
if err = decoder.Decode(&event); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
- if c.eventMonitor.isEnabled() {
+ c.eventMonitor.RLock()
+ if c.eventMonitor.enabled && c.eventMonitor.C == eventChan {
// Signal that we're exiting.
eventChan <- EOFEvent
}
+ c.eventMonitor.RUnlock()
break
}
errChan <- err
@@ -321,7 +342,7 @@ func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan
if event.Time == 0 {
continue
}
- if !c.eventMonitor.isEnabled() {
+ if !c.eventMonitor.isEnabled() || c.eventMonitor.C != eventChan {
return
}
transformEvent(&event)
diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go
index 1a16da9d6..cf1385a0d 100644
--- a/vendor/github.com/fsouza/go-dockerclient/exec.go
+++ b/vendor/github.com/fsouza/go-dockerclient/exec.go
@@ -1,4 +1,4 @@
-// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Copyright 2014 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -11,6 +11,8 @@ import (
"net/http"
"net/url"
"strconv"
+
+ "golang.org/x/net/context"
)
// Exec is the type representing a `docker exec` instance and containing the
@@ -21,24 +23,26 @@ type Exec struct {
// CreateExecOptions specify parameters to the CreateExecContainer function.
//
-// See https://goo.gl/1KSIb7 for more details
+// See https://goo.gl/60TeBP for more details
type CreateExecOptions struct {
- AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
- AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
- AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
- Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
- Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
- Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
- User string `json:"User,omitempty" yaml:"User,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty"`
+ Context context.Context `json:"-"`
+ Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
}
// CreateExec sets up an exec instance in a running container `id`, returning the exec
// instance, or an error in case of failure.
//
-// See https://goo.gl/1KSIb7 for more details
+// See https://goo.gl/60TeBP for more details
func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
path := fmt.Sprintf("/containers/%s/exec", opts.Container)
- resp, err := c.do("POST", path, doOptions{data: opts})
+ resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchContainer{ID: opts.Container}
@@ -56,16 +60,15 @@ func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
// StartExecOptions specify parameters to the StartExecContainer function.
//
-// See https://goo.gl/iQCnto for more details
+// See https://goo.gl/1EeDWi for more details
type StartExecOptions struct {
- Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
-
- Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
-
InputStream io.Reader `qs:"-"`
OutputStream io.Writer `qs:"-"`
ErrorStream io.Writer `qs:"-"`
+ Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+
// Use raw terminal? Usually true when the container contains a TTY.
RawTerminal bool `qs:"-"`
@@ -75,13 +78,15 @@ type StartExecOptions struct {
// It must be an unbuffered channel. Using a buffered channel can lead
// to unexpected behavior.
Success chan struct{} `json:"-"`
+
+ Context context.Context `json:"-"`
}
// StartExec starts a previously set up exec instance id. If opts.Detach is
// true, it returns after starting the exec command. Otherwise, it sets up an
// interactive session with the exec command.
//
-// See https://goo.gl/iQCnto for more details
+// See https://goo.gl/1EeDWi for more details
func (c *Client) StartExec(id string, opts StartExecOptions) error {
cw, err := c.StartExecNonBlocking(id, opts)
if err != nil {
@@ -97,7 +102,7 @@ func (c *Client) StartExec(id string, opts StartExecOptions) error {
// true, it returns after starting the exec command. Otherwise, it sets up an
// interactive session with the exec command.
//
-// See https://goo.gl/iQCnto for more details
+// See https://goo.gl/1EeDWi for more details
func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) {
if id == "" {
return nil, &NoSuchExec{ID: id}
@@ -106,7 +111,7 @@ func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWa
path := fmt.Sprintf("/exec/%s/start", id)
if opts.Detach {
- resp, err := c.do("POST", path, doOptions{data: opts})
+ resp, err := c.do("POST", path, doOptions{data: opts, context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return nil, &NoSuchExec{ID: id}
@@ -131,7 +136,7 @@ func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWa
// is valid only if Tty was specified as part of creating and starting the exec
// command.
//
-// See https://goo.gl/e1JpsA for more details
+// See https://goo.gl/Mo5bxx for more details
func (c *Client) ResizeExecTTY(id string, height, width int) error {
params := make(url.Values)
params.Set("h", strconv.Itoa(height))
@@ -149,8 +154,8 @@ func (c *Client) ResizeExecTTY(id string, height, width int) error {
// ExecProcessConfig is a type describing the command associated to a Exec
// instance. It's used in the ExecInspect type.
type ExecProcessConfig struct {
- Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
User string `json:"user,omitempty" yaml:"user,omitempty"`
+ Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
@@ -160,11 +165,11 @@ type ExecProcessConfig struct {
// exit code if the command has finished running. It's returned by a api
// call to /exec/(id)/json
//
-// See https://goo.gl/gPtX9R for more details
+// See https://goo.gl/ctMUiW for more details
type ExecInspect struct {
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
- Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
@@ -174,7 +179,7 @@ type ExecInspect struct {
// InspectExec returns low-level information about the exec command id.
//
-// See https://goo.gl/gPtX9R for more details
+// See https://goo.gl/ctMUiW for more details
func (c *Client) InspectExec(id string) (*ExecInspect, error) {
path := fmt.Sprintf("/exec/%s/json", id)
resp, err := c.do("GET", path, doOptions{})
diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go
index ca4506354..bc6d990fb 100644
--- a/vendor/github.com/fsouza/go-dockerclient/image.go
+++ b/vendor/github.com/fsouza/go-dockerclient/image.go
@@ -1,4 +1,4 @@
-// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Copyright 2013 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -15,6 +15,8 @@ import (
"net/url"
"os"
"time"
+
+ "golang.org/x/net/context"
)
// APIImages represent an image returned in the ListImages call.
@@ -29,6 +31,12 @@ type APIImages struct {
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
}
+// RootFS represents the underlying layers used by an image
+type RootFS struct {
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
+ Layers []string `json:"Layers,omitempty" yaml:"Layers,omitempty"`
+}
+
// Image is the type representing a docker image and its various properties
type Image struct {
ID string `json:"Id" yaml:"Id"`
@@ -45,6 +53,7 @@ type Image struct {
Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"`
RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"`
+ RootFS *RootFS `json:"RootFS,omitempty" yaml:"RootFS,omitempty"`
}
// ImagePre012 serves the same purpose as the Image type except that it is for
@@ -86,20 +95,21 @@ var (
// ListImagesOptions specify parameters to the ListImages function.
//
-// See https://goo.gl/xBe1u3 for more details.
+// See https://goo.gl/BVzauZ for more details.
type ListImagesOptions struct {
- All bool
Filters map[string][]string
+ All bool
Digests bool
Filter string
+ Context context.Context
}
// ListImages returns the list of available images in the server.
//
-// See https://goo.gl/xBe1u3 for more details.
+// See https://goo.gl/BVzauZ for more details.
func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) {
path := "/images/json?" + queryString(opts)
- resp, err := c.do("GET", path, doOptions{})
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
if err != nil {
return nil, err
}
@@ -123,7 +133,7 @@ type ImageHistory struct {
// ImageHistory returns the history of the image by its name or ID.
//
-// See https://goo.gl/8bnTId for more details.
+// See https://goo.gl/fYtxQa for more details.
func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
resp, err := c.do("GET", "/images/"+name+"/history", doOptions{})
if err != nil {
@@ -142,7 +152,7 @@ func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
// RemoveImage removes an image by its name or ID.
//
-// See https://goo.gl/V3ZWnK for more details.
+// See https://goo.gl/Vd2Pck for more details.
func (c *Client) RemoveImage(name string) error {
resp, err := c.do("DELETE", "/images/"+name, doOptions{})
if err != nil {
@@ -158,19 +168,20 @@ func (c *Client) RemoveImage(name string) error {
// RemoveImageOptions present the set of options available for removing an image
// from a registry.
//
-// See https://goo.gl/V3ZWnK for more details.
+// See https://goo.gl/Vd2Pck for more details.
type RemoveImageOptions struct {
Force bool `qs:"force"`
NoPrune bool `qs:"noprune"`
+ Context context.Context
}
// RemoveImageExtended removes an image by its name or ID.
// Extra params can be passed, see RemoveImageOptions
//
-// See https://goo.gl/V3ZWnK for more details.
+// See https://goo.gl/Vd2Pck for more details.
func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error {
uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts))
- resp, err := c.do("DELETE", uri, doOptions{})
+ resp, err := c.do("DELETE", uri, doOptions{context: opts.Context})
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return ErrNoSuchImage
@@ -183,7 +194,7 @@ func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error
// InspectImage returns an image by its name or ID.
//
-// See https://goo.gl/jHPcg6 for more details.
+// See https://goo.gl/ncLTG8 for more details.
func (c *Client) InspectImage(name string) (*Image, error) {
resp, err := c.do("GET", "/images/"+name+"/json", doOptions{})
if err != nil {
@@ -225,7 +236,7 @@ func (c *Client) InspectImage(name string) (*Image, error) {
// PushImageOptions represents options to use in the PushImage method.
//
-// See https://goo.gl/zPtZaT for more details.
+// See https://goo.gl/BZemGg for more details.
type PushImageOptions struct {
// Name of the image
Name string
@@ -236,8 +247,11 @@ type PushImageOptions struct {
// Registry server to push the image
Registry string
- OutputStream io.Writer `qs:"-"`
- RawJSONStream bool `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+
+ Context context.Context
}
// PushImage pushes an image to a remote registry, logging progress to w.
@@ -245,7 +259,7 @@ type PushImageOptions struct {
// An empty instance of AuthConfiguration may be used for unauthenticated
// pushes.
//
-// See https://goo.gl/zPtZaT for more details.
+// See https://goo.gl/BZemGg for more details.
func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error {
if opts.Name == "" {
return ErrNoSuchImage
@@ -258,29 +272,38 @@ func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error
opts.Name = ""
path := "/images/" + name + "/push?" + queryString(&opts)
return c.stream("POST", path, streamOptions{
- setRawTerminal: true,
- rawJSONStream: opts.RawJSONStream,
- headers: headers,
- stdout: opts.OutputStream,
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
})
}
// PullImageOptions present the set of options available for pulling an image
// from a registry.
//
-// See https://goo.gl/iJkZjD for more details.
+// See https://goo.gl/qkoSsn for more details.
type PullImageOptions struct {
- Repository string `qs:"fromImage"`
- Registry string
- Tag string
- OutputStream io.Writer `qs:"-"`
- RawJSONStream bool `qs:"-"`
+ Repository string `qs:"fromImage"`
+ Tag string
+
+ // Only required for Docker Engine 1.9 or 1.10 w/ Remote API < 1.21
+ // and Docker Engine < 1.9
+ // This parameter was removed in Docker Engine 1.11
+ Registry string
+
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
}
// PullImage pulls an image from a remote registry, logging progress to
// opts.OutputStream.
//
-// See https://goo.gl/iJkZjD for more details.
+// See https://goo.gl/qkoSsn for more details.
func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error {
if opts.Repository == "" {
return ErrNoSuchImage
@@ -290,93 +313,106 @@ func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error
if err != nil {
return err
}
- return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream)
+ return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context)
}
-func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error {
+func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration, context context.Context) error {
path := "/images/create?" + qs
return c.stream("POST", path, streamOptions{
- setRawTerminal: true,
- rawJSONStream: rawJSONStream,
- headers: headers,
- in: in,
- stdout: w,
+ setRawTerminal: true,
+ headers: headers,
+ in: in,
+ stdout: w,
+ rawJSONStream: rawJSONStream,
+ inactivityTimeout: timeout,
+ context: context,
})
}
// LoadImageOptions represents the options for LoadImage Docker API Call
//
-// See https://goo.gl/JyClMX for more details.
+// See https://goo.gl/rEsBV3 for more details.
type LoadImageOptions struct {
InputStream io.Reader
+ Context context.Context
}
// LoadImage imports a tarball docker image
//
-// See https://goo.gl/JyClMX for more details.
+// See https://goo.gl/rEsBV3 for more details.
func (c *Client) LoadImage(opts LoadImageOptions) error {
return c.stream("POST", "/images/load", streamOptions{
setRawTerminal: true,
in: opts.InputStream,
+ context: opts.Context,
})
}
// ExportImageOptions represent the options for ExportImage Docker API call.
//
-// See https://goo.gl/le7vK8 for more details.
+// See https://goo.gl/AuySaA for more details.
type ExportImageOptions struct {
- Name string
- OutputStream io.Writer
+ Name string
+ OutputStream io.Writer
+ InactivityTimeout time.Duration
+ Context context.Context
}
// ExportImage exports an image (as a tar file) into the stream.
//
-// See https://goo.gl/le7vK8 for more details.
+// See https://goo.gl/AuySaA for more details.
func (c *Client) ExportImage(opts ExportImageOptions) error {
return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{
- setRawTerminal: true,
- stdout: opts.OutputStream,
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
})
}
// ExportImagesOptions represent the options for ExportImages Docker API call
//
-// See https://goo.gl/huC7HA for more details.
+// See https://goo.gl/N9XlDn for more details.
type ExportImagesOptions struct {
- Names []string
- OutputStream io.Writer `qs:"-"`
+ Names []string
+ OutputStream io.Writer `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
}
// ExportImages exports one or more images (as a tar file) into the stream
//
-// See https://goo.gl/huC7HA for more details.
+// See https://goo.gl/N9XlDn for more details.
func (c *Client) ExportImages(opts ExportImagesOptions) error {
if opts.Names == nil || len(opts.Names) == 0 {
return ErrMustSpecifyNames
}
return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{
- setRawTerminal: true,
- stdout: opts.OutputStream,
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
})
}
// ImportImageOptions present the set of informations available for importing
// an image from a source file or the stdin.
//
-// See https://goo.gl/iJkZjD for more details.
+// See https://goo.gl/qkoSsn for more details.
type ImportImageOptions struct {
Repository string `qs:"repo"`
Source string `qs:"fromSrc"`
Tag string `qs:"tag"`
- InputStream io.Reader `qs:"-"`
- OutputStream io.Writer `qs:"-"`
- RawJSONStream bool `qs:"-"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
}
// ImportImage imports an image from a url, a file or stdin
//
-// See https://goo.gl/iJkZjD for more details.
+// See https://goo.gl/qkoSsn for more details.
func (c *Client) ImportImage(opts ImportImageOptions) error {
if opts.Repository == "" {
return ErrNoSuchImage
@@ -392,14 +428,14 @@ func (c *Client) ImportImage(opts ImportImageOptions) error {
opts.InputStream = f
opts.Source = "-"
}
- return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream)
+ return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout, opts.Context)
}
// BuildImageOptions present the set of informations available for building an
// image from a tarfile with a Dockerfile in it.
//
// For more details about the Docker building process, see
-// http://goo.gl/tlPXPu.
+// https://goo.gl/4nYHwV.
type BuildImageOptions struct {
Name string `qs:"t"`
Dockerfile string `qs:"dockerfile"`
@@ -408,6 +444,7 @@ type BuildImageOptions struct {
Pull bool `qs:"pull"`
RmTmpContainer bool `qs:"rm"`
ForceRmTmpContainer bool `qs:"forcerm"`
+ RawJSONStream bool `qs:"-"`
Memory int64 `qs:"memory"`
Memswap int64 `qs:"memswap"`
CPUShares int64 `qs:"cpushares"`
@@ -416,20 +453,21 @@ type BuildImageOptions struct {
CPUSetCPUs string `qs:"cpusetcpus"`
InputStream io.Reader `qs:"-"`
OutputStream io.Writer `qs:"-"`
- RawJSONStream bool `qs:"-"`
Remote string `qs:"remote"`
Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header
AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header
ContextDir string `qs:"-"`
Ulimits []ULimit `qs:"-"`
BuildArgs []BuildArg `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Context context.Context
}
// BuildArg represents arguments that can be passed to the image when building
// it from a Dockerfile.
//
// For more details about the Docker building process, see
-// http://goo.gl/tlPXPu.
+// https://goo.gl/4nYHwV.
type BuildArg struct {
Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
Value string `json:"Value,omitempty" yaml:"Value,omitempty"`
@@ -438,7 +476,7 @@ type BuildArg struct {
// BuildImage builds an image from a tarball's url or a Dockerfile in the input
// stream.
//
-// See https://goo.gl/xySxCe for more details.
+// See https://goo.gl/4nYHwV for more details.
func (c *Client) BuildImage(opts BuildImageOptions) error {
if opts.OutputStream == nil {
return ErrMissingOutputStream
@@ -488,11 +526,13 @@ func (c *Client) BuildImage(opts BuildImageOptions) error {
}
return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{
- setRawTerminal: true,
- rawJSONStream: opts.RawJSONStream,
- headers: headers,
- in: opts.InputStream,
- stdout: opts.OutputStream,
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ context: opts.Context,
})
}
@@ -508,22 +548,24 @@ func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{
// TagImageOptions present the set of options to tag an image.
//
-// See https://goo.gl/98ZzkU for more details.
+// See https://goo.gl/prHrvo for more details.
type TagImageOptions struct {
- Repo string
- Tag string
- Force bool
+ Repo string
+ Tag string
+ Force bool
+ Context context.Context
}
// TagImage adds a tag to the image identified by the given name.
//
-// See https://goo.gl/98ZzkU for more details.
+// See https://goo.gl/prHrvo for more details.
func (c *Client) TagImage(name string, opts TagImageOptions) error {
if name == "" {
return ErrNoSuchImage
}
- resp, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s",
- queryString(&opts)), doOptions{})
+ resp, err := c.do("POST", "/images/"+name+"/tag?"+queryString(&opts), doOptions{
+ context: opts.Context,
+ })
if err != nil {
return err
@@ -571,7 +613,7 @@ func headersWithAuth(auths ...interface{}) (map[string]string, error) {
// APIImageSearch reflect the result of a search on the Docker Hub.
//
-// See https://goo.gl/AYjyrF for more details.
+// See https://goo.gl/KLO9IZ for more details.
type APIImageSearch struct {
Description string `json:"description,omitempty" yaml:"description,omitempty"`
IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"`
@@ -582,13 +624,13 @@ type APIImageSearch struct {
// SearchImages search the docker hub with a specific given term.
//
-// See https://goo.gl/AYjyrF for more details.
+// See https://goo.gl/KLO9IZ for more details.
func (c *Client) SearchImages(term string) ([]APIImageSearch, error) {
resp, err := c.do("GET", "/images/search?term="+term, doOptions{})
- defer resp.Body.Close()
if err != nil {
return nil, err
}
+ defer resp.Body.Close()
var searchResult []APIImageSearch
if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil {
return nil, err
@@ -598,7 +640,7 @@ func (c *Client) SearchImages(term string) ([]APIImageSearch, error) {
// SearchImagesEx search the docker hub with a specific given term and authentication.
//
-// See https://goo.gl/AYjyrF for more details.
+// See https://goo.gl/KLO9IZ for more details.
func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) {
headers, err := headersWithAuth(auth)
if err != nil {
diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go
index ce9e9750b..aef595fe1 100644
--- a/vendor/github.com/fsouza/go-dockerclient/misc.go
+++ b/vendor/github.com/fsouza/go-dockerclient/misc.go
@@ -1,4 +1,4 @@
-// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Copyright 2013 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -6,12 +6,15 @@ package docker
import (
"encoding/json"
+ "net"
"strings"
+
+ "github.com/docker/docker/api/types/swarm"
)
// Version returns version information about the docker server.
//
-// See https://goo.gl/ND9R8L for more details.
+// See https://goo.gl/mU7yje for more details.
func (c *Client) Version() (*Env, error) {
resp, err := c.do("GET", "/version", doOptions{})
if err != nil {
@@ -50,8 +53,9 @@ type DockerInfo struct {
BridgeNfIptables bool
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
Debug bool
- NFd int
OomKillDisable bool
+ ExperimentalBuild bool
+ NFd int
NGoroutines int
SystemTime string
ExecutionDriver string
@@ -63,6 +67,7 @@ type DockerInfo struct {
OSType string
Architecture string
IndexServerAddress string
+ RegistryConfig *ServiceConfig
NCPU int
MemTotal int64
DockerRootDir string
@@ -71,10 +76,10 @@ type DockerInfo struct {
NoProxy string
Name string
Labels []string
- ExperimentalBuild bool
ServerVersion string
ClusterStore string
ClusterAdvertise string
+ Swarm swarm.Info
}
// PluginsInfo is a struct with the plugins registered with the docker daemon
@@ -89,6 +94,50 @@ type PluginsInfo struct {
Authorization []string
}
+// ServiceConfig stores daemon registry services configuration.
+//
+// for more information, see: https://goo.gl/7iFFDz
+type ServiceConfig struct {
+ InsecureRegistryCIDRs []*NetIPNet
+ IndexConfigs map[string]*IndexInfo
+ Mirrors []string
+}
+
+// NetIPNet is the net.IPNet type, which can be marshalled and
+// unmarshalled to JSON.
+//
+// for more information, see: https://goo.gl/7iFFDz
+type NetIPNet net.IPNet
+
+// MarshalJSON returns the JSON representation of the IPNet.
+//
+func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(ipnet).String())
+}
+
+// UnmarshalJSON sets the IPNet from a byte array of JSON.
+//
+func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
+ var ipnetStr string
+ if err = json.Unmarshal(b, &ipnetStr); err == nil {
+ var cidr *net.IPNet
+ if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
+ *ipnet = NetIPNet(*cidr)
+ }
+ }
+ return
+}
+
+// IndexInfo contains information about a registry.
+//
+// for more information, see: https://goo.gl/7iFFDz
+type IndexInfo struct {
+ Name string
+ Mirrors []string
+ Secure bool
+ Official bool
+}
+
// Info returns system-wide information about the Docker server.
//
// See https://goo.gl/ElTHi2 for more details.
@@ -106,13 +155,17 @@ func (c *Client) Info() (*DockerInfo, error) {
}
// ParseRepositoryTag gets the name of the repository and returns it splitted
-// in two parts: the repository and the tag.
+// in two parts: the repository and the tag. It ignores the digest when it is
+// present.
//
// Some examples:
//
// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest
// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, ""
+// busybox:latest@sha256:4a731fb46adc5cefe3ae374a8b6020fc1b6ad667a279647766e9a3cd89f6fa92 -> busybox, latest
func ParseRepositoryTag(repoTag string) (repository string, tag string) {
+ parts := strings.SplitN(repoTag, "@", 2)
+ repoTag = parts[0]
n := strings.LastIndex(repoTag, ":")
if n < 0 {
return repoTag, ""
diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go
index b72e91a07..997092835 100644
--- a/vendor/github.com/fsouza/go-dockerclient/network.go
+++ b/vendor/github.com/fsouza/go-dockerclient/network.go
@@ -5,11 +5,12 @@
package docker
import (
- "bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
+
+ "golang.org/x/net/context"
)
// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the
@@ -28,6 +29,8 @@ type Network struct {
Containers map[string]Endpoint
Options map[string]string
Internal bool
+ EnableIPv6 bool `json:"EnableIPv6"`
+ Labels map[string]string
}
// Endpoint contains network resources allocated and used for a container in a network
@@ -65,11 +68,11 @@ type NetworkFilterOpts map[string]map[string]bool
//
// See goo.gl/zd2mx4 for more details.
func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) {
- params := bytes.NewBuffer(nil)
- if err := json.NewEncoder(params).Encode(&opts); err != nil {
+ params, err := json.Marshal(opts)
+ if err != nil {
return nil, err
}
- path := "/networks?filters=" + params.String()
+ path := "/networks?filters=" + string(params)
resp, err := c.do("GET", path, doOptions{})
if err != nil {
return nil, err
@@ -107,19 +110,23 @@ func (c *Client) NetworkInfo(id string) (*Network, error) {
//
// See https://goo.gl/6GugX3 for more details.
type CreateNetworkOptions struct {
- Name string `json:"Name"`
- CheckDuplicate bool `json:"CheckDuplicate"`
- Driver string `json:"Driver"`
- IPAM IPAMOptions `json:"IPAM"`
- Options map[string]interface{} `json:"options"`
+ Name string `json:"Name" yaml:"Name"`
+ Driver string `json:"Driver" yaml:"Driver"`
+ IPAM IPAMOptions `json:"IPAM" yaml:"IPAM"`
+ Options map[string]interface{} `json:"Options" yaml:"Options"`
+ Labels map[string]string `json:"Labels" yaml:"Labels"`
+ CheckDuplicate bool `json:"CheckDuplicate" yaml:"CheckDuplicate"`
+ Internal bool `json:"Internal" yaml:"Internal"`
+ EnableIPv6 bool `json:"EnableIPv6" yaml:"EnableIPv6"`
+ Context context.Context `json:"-"`
}
// IPAMOptions controls IP Address Management when creating a network
//
// See https://goo.gl/T8kRVH for more details.
type IPAMOptions struct {
- Driver string `json:"Driver"`
- Config []IPAMConfig `json:"IPAMConfig"`
+ Driver string `json:"Driver" yaml:"Driver"`
+ Config []IPAMConfig `json:"Config" yaml:"Config"`
}
// IPAMConfig represents IPAM configurations
@@ -141,7 +148,8 @@ func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) {
"POST",
"/networks/create",
doOptions{
- data: opts,
+ data: opts,
+ context: opts.Context,
},
)
if err != nil {
@@ -197,15 +205,26 @@ type NetworkConnectionOptions struct {
// Force is only applicable to the DisconnectNetwork call
Force bool
+
+ Context context.Context `json:"-"`
}
// EndpointConfig stores network endpoint details
//
// See https://goo.gl/RV7BJU for more details.
type EndpointConfig struct {
- IPAMConfig *EndpointIPAMConfig
- Links []string
- Aliases []string
+ IPAMConfig *EndpointIPAMConfig `json:"IPAMConfig,omitempty" yaml:"IPAMConfig,omitempty"`
+ Links []string `json:"Links,omitempty" yaml:"Links,omitempty"`
+ Aliases []string `json:"Aliases,omitempty" yaml:"Aliases,omitempty"`
+ NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"`
+ EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"`
+ Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
+ IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
+ IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
+ IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
}
// EndpointIPAMConfig represents IPAM configurations for an
@@ -222,7 +241,10 @@ type EndpointIPAMConfig struct {
//
// See https://goo.gl/6GugX3 for more details.
func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error {
- resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{data: opts})
+ resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{
+ data: opts,
+ context: opts.Context,
+ })
if err != nil {
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container}
diff --git a/vendor/github.com/fsouza/go-dockerclient/node.go b/vendor/github.com/fsouza/go-dockerclient/node.go
new file mode 100644
index 000000000..092fb6716
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/node.go
@@ -0,0 +1,128 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NoSuchNode is the error returned when a given node does not exist.
+type NoSuchNode struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchNode) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such node: " + err.ID
+}
+
+// ListNodesOptions specify parameters to the ListNodes function.
+//
+// See http://goo.gl/3K4GwU for more details.
+type ListNodesOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListNodes returns a slice of nodes matching the given criteria.
+//
+// See http://goo.gl/3K4GwU for more details.
+func (c *Client) ListNodes(opts ListNodesOptions) ([]swarm.Node, error) {
+ path := "/nodes?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var nodes []swarm.Node
+ if err := json.NewDecoder(resp.Body).Decode(&nodes); err != nil {
+ return nil, err
+ }
+ return nodes, nil
+}
+
+// InspectNode returns information about a node by its ID.
+//
+// See http://goo.gl/WjkTOk for more details.
+func (c *Client) InspectNode(id string) (*swarm.Node, error) {
+ resp, err := c.do("GET", "/nodes/"+id, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchNode{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var node swarm.Node
+ if err := json.NewDecoder(resp.Body).Decode(&node); err != nil {
+ return nil, err
+ }
+ return &node, nil
+}
+
+// UpdateNodeOptions specify parameters to the NodeUpdate function.
+//
+// See http://goo.gl/VPBFgA for more details.
+type UpdateNodeOptions struct {
+ swarm.NodeSpec
+ Version uint64
+ Context context.Context
+}
+
+// UpdateNode updates a node.
+//
+// See http://goo.gl/VPBFgA for more details.
+func (c *Client) UpdateNode(id string, opts UpdateNodeOptions) error {
+ params := make(url.Values)
+ params.Set("version", strconv.FormatUint(opts.Version, 10))
+ path := "/nodes/" + id + "/update?" + params.Encode()
+ _, err := c.do("POST", path, doOptions{
+ context: opts.Context,
+ forceJSON: true,
+ data: opts.NodeSpec,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNode{ID: id}
+ }
+ return err
+ }
+ return nil
+}
+
+// RemoveNodeOptions specify parameters to the RemoveNode function.
+//
+// See http://goo.gl/0SNvYg for more details.
+type RemoveNodeOptions struct {
+ ID string
+ Force bool
+ Context context.Context
+}
+
+// RemoveNode removes a node.
+//
+// See http://goo.gl/0SNvYg for more details.
+func (c *Client) RemoveNode(opts RemoveNodeOptions) error {
+ params := make(url.Values)
+ params.Set("force", strconv.FormatBool(opts.Force))
+ path := "/nodes/" + opts.ID + "?" + params.Encode()
+ _, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNode{ID: opts.ID}
+ }
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/service.go b/vendor/github.com/fsouza/go-dockerclient/service.go
new file mode 100644
index 000000000..3daf59c5d
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/service.go
@@ -0,0 +1,157 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NoSuchService is the error returned when a given service does not exist.
+type NoSuchService struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchService) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such service: " + err.ID
+}
+
+// CreateServiceOptions specify parameters to the CreateService function.
+//
+// See https://goo.gl/KrVjHz for more details.
+type CreateServiceOptions struct {
+ swarm.ServiceSpec
+ Context context.Context
+}
+
+// CreateService creates a new service, returning the service instance
+// or an error in case of failure.
+//
+// See https://goo.gl/KrVjHz for more details.
+func (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error) {
+ path := "/services/create?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.ServiceSpec,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var service swarm.Service
+ if err := json.NewDecoder(resp.Body).Decode(&service); err != nil {
+ return nil, err
+ }
+ return &service, nil
+}
+
+// RemoveServiceOptions encapsulates options to remove a service.
+//
+// See https://goo.gl/Tqrtya for more details.
+type RemoveServiceOptions struct {
+ ID string `qs:"-"`
+ Context context.Context
+}
+
+// RemoveService removes a service, returning an error in case of failure.
+//
+// See https://goo.gl/Tqrtya for more details.
+func (c *Client) RemoveService(opts RemoveServiceOptions) error {
+ path := "/services/" + opts.ID
+ resp, err := c.do("DELETE", path, doOptions{context: opts.Context})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchService{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UpdateServiceOptions specify parameters to the UpdateService function.
+//
+// See https://goo.gl/wu3MmS for more details.
+type UpdateServiceOptions struct {
+ swarm.ServiceSpec
+ Context context.Context
+ Version uint64
+}
+
+// UpdateService updates the service at ID with the options
+//
+// See https://goo.gl/wu3MmS for more details.
+func (c *Client) UpdateService(id string, opts UpdateServiceOptions) error {
+ params := make(url.Values)
+ params.Set("version", strconv.FormatUint(opts.Version, 10))
+ resp, err := c.do("POST", "/services/"+id+"/update?"+params.Encode(), doOptions{
+ data: opts.ServiceSpec,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchService{ID: id}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+}
+
+// InspectService returns information about a service by its ID.
+//
+// See https://goo.gl/dHmr75 for more details.
+func (c *Client) InspectService(id string) (*swarm.Service, error) {
+ path := "/services/" + id
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchService{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var service swarm.Service
+ if err := json.NewDecoder(resp.Body).Decode(&service); err != nil {
+ return nil, err
+ }
+ return &service, nil
+}
+
+// ListServicesOptions specify parameters to the ListServices function.
+//
+// See https://goo.gl/DwvNMd for more details.
+type ListServicesOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListServices returns a slice of services matching the given criteria.
+//
+// See https://goo.gl/DwvNMd for more details.
+func (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) {
+ path := "/services?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var services []swarm.Service
+ if err := json.NewDecoder(resp.Body).Decode(&services); err != nil {
+ return nil, err
+ }
+ return services, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/swarm.go b/vendor/github.com/fsouza/go-dockerclient/swarm.go
new file mode 100644
index 000000000..0f8c4eb45
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/swarm.go
@@ -0,0 +1,150 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+var (
+ // ErrNodeAlreadyInSwarm is the error returned by InitSwarm and JoinSwarm
+ // when the node is already part of a Swarm.
+ ErrNodeAlreadyInSwarm = errors.New("node already in a Swarm")
+
+ // ErrNodeNotInSwarm is the error returned by LeaveSwarm and UpdateSwarm
+ // when the node is not part of a Swarm.
+ ErrNodeNotInSwarm = errors.New("node is not in a Swarm")
+)
+
+// InitSwarmOptions specify parameters to the InitSwarm function.
+// See https://goo.gl/hzkgWu for more details.
+type InitSwarmOptions struct {
+ swarm.InitRequest
+ Context context.Context
+}
+
+// InitSwarm initializes a new Swarm and returns the node ID.
+// See https://goo.gl/hzkgWu for more details.
+func (c *Client) InitSwarm(opts InitSwarmOptions) (string, error) {
+ path := "/swarm/init"
+ resp, err := c.do("POST", path, doOptions{
+ data: opts.InitRequest,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotAcceptable {
+ return "", ErrNodeAlreadyInSwarm
+ }
+ return "", err
+ }
+ defer resp.Body.Close()
+ var response string
+ if err := json.NewDecoder(resp.Body).Decode(&response); err != nil {
+ return "", err
+ }
+ return response, nil
+}
+
+// JoinSwarmOptions specify parameters to the JoinSwarm function.
+// See https://goo.gl/TdhJWU for more details.
+type JoinSwarmOptions struct {
+ swarm.JoinRequest
+ Context context.Context
+}
+
+// JoinSwarm joins an existing Swarm.
+// See https://goo.gl/TdhJWU for more details.
+func (c *Client) JoinSwarm(opts JoinSwarmOptions) error {
+ path := "/swarm/join"
+ _, err := c.do("POST", path, doOptions{
+ data: opts.JoinRequest,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotAcceptable {
+ return ErrNodeAlreadyInSwarm
+ }
+ }
+ return err
+}
+
+// LeaveSwarmOptions specify parameters to the LeaveSwarm function.
+// See https://goo.gl/UWDlLg for more details.
+type LeaveSwarmOptions struct {
+ Force bool
+ Context context.Context
+}
+
+// LeaveSwarm leaves a Swarm.
+// See https://goo.gl/UWDlLg for more details.
+func (c *Client) LeaveSwarm(opts LeaveSwarmOptions) error {
+ params := make(url.Values)
+ params.Set("force", strconv.FormatBool(opts.Force))
+ path := "/swarm/leave?" + params.Encode()
+ _, err := c.do("POST", path, doOptions{
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotAcceptable {
+ return ErrNodeNotInSwarm
+ }
+ }
+ return err
+}
+
+// UpdateSwarmOptions specify parameters to the UpdateSwarm function.
+// See https://goo.gl/vFbq36 for more details.
+type UpdateSwarmOptions struct {
+ Version int
+ RotateWorkerToken bool
+ RotateManagerToken bool
+ Swarm swarm.Spec
+ Context context.Context
+}
+
+// UpdateSwarm updates a Swarm.
+// See https://goo.gl/vFbq36 for more details.
+func (c *Client) UpdateSwarm(opts UpdateSwarmOptions) error {
+ params := make(url.Values)
+ params.Set("version", strconv.Itoa(opts.Version))
+ params.Set("rotateWorkerToken", strconv.FormatBool(opts.RotateWorkerToken))
+ params.Set("rotateManagerToken", strconv.FormatBool(opts.RotateManagerToken))
+ path := "/swarm/update?" + params.Encode()
+ _, err := c.do("POST", path, doOptions{
+ data: opts.Swarm,
+ forceJSON: true,
+ context: opts.Context,
+ })
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotAcceptable {
+ return ErrNodeNotInSwarm
+ }
+ }
+ return err
+}
+
+// InspectSwarm inspects a Swarm.
+// See http://goo.gl/nvwytL for more details.
+func (c *Client) InspectSwarm(ctx context.Context) (swarm.Swarm, error) {
+ response := swarm.Swarm{}
+ resp, err := c.do("GET", "/swarm", doOptions{
+ context: ctx,
+ })
+ if err != nil {
+ return response, err
+ }
+ defer resp.Body.Close()
+ err = json.NewDecoder(resp.Body).Decode(&response)
+ return response, err
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go
index 48042cbda..be4dfa573 100644
--- a/vendor/github.com/fsouza/go-dockerclient/tar.go
+++ b/vendor/github.com/fsouza/go-dockerclient/tar.go
@@ -1,4 +1,4 @@
-// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Copyright 2014 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -13,8 +13,8 @@ import (
"path/filepath"
"strings"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/fileutils"
)
func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
@@ -67,10 +67,10 @@ func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
func validateContextDirectory(srcPath string, excludes []string) error {
return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
// skip this directory/file if it's not in the path, it won't get added to the context
- if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
- return err
- } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
- return err
+ if relFilePath, relErr := filepath.Rel(srcPath, filePath); relErr != nil {
+ return relErr
+ } else if skip, matchErr := fileutils.Matches(relFilePath, excludes); matchErr != nil {
+ return matchErr
} else if skip {
if f.IsDir() {
return filepath.SkipDir
diff --git a/vendor/github.com/fsouza/go-dockerclient/task.go b/vendor/github.com/fsouza/go-dockerclient/task.go
new file mode 100644
index 000000000..b1dad4b23
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/task.go
@@ -0,0 +1,70 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NoSuchTask is the error returned when a given task does not exist.
+type NoSuchTask struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchTask) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such task: " + err.ID
+}
+
+// ListTasksOptions specify parameters to the ListTasks function.
+//
+// See http://goo.gl/rByLzw for more details.
+type ListTasksOptions struct {
+ Filters map[string][]string
+ Context context.Context
+}
+
+// ListTasks returns a slice of tasks matching the given criteria.
+//
+// See http://goo.gl/rByLzw for more details.
+func (c *Client) ListTasks(opts ListTasksOptions) ([]swarm.Task, error) {
+ path := "/tasks?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{context: opts.Context})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var tasks []swarm.Task
+ if err := json.NewDecoder(resp.Body).Decode(&tasks); err != nil {
+ return nil, err
+ }
+ return tasks, nil
+}
+
+// InspectTask returns information about a task by its ID.
+//
+// See http://goo.gl/kyziuq for more details.
+func (c *Client) InspectTask(id string) (*swarm.Task, error) {
+ resp, err := c.do("GET", "/tasks/"+id, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchTask{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var task swarm.Task
+ if err := json.NewDecoder(resp.Body).Decode(&task); err != nil {
+ return nil, err
+ }
+ return &task, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go
index 55f43174b..bb5790b5f 100644
--- a/vendor/github.com/fsouza/go-dockerclient/tls.go
+++ b/vendor/github.com/fsouza/go-dockerclient/tls.go
@@ -68,9 +68,8 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
// from the hostname we're connecting to.
if config.ServerName == "" {
// Make a copy to avoid polluting argument or default.
- c := *config
- c.ServerName = hostname
- config = &c
+ config = copyTLSConfig(config)
+ config.ServerName = hostname
}
conn := tls.Client(rawConn, config)
@@ -94,3 +93,26 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
// wrapper which holds both the TLS and raw connections.
return &tlsClientCon{conn, rawConn}, nil
}
+
+// this exists to silent an error message in go vet
+func copyTLSConfig(cfg *tls.Config) *tls.Config {
+ return &tls.Config{
+ Certificates: cfg.Certificates,
+ CipherSuites: cfg.CipherSuites,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ ClientSessionCache: cfg.ClientSessionCache,
+ CurvePreferences: cfg.CurvePreferences,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ MaxVersion: cfg.MaxVersion,
+ MinVersion: cfg.MinVersion,
+ NameToCertificate: cfg.NameToCertificate,
+ NextProtos: cfg.NextProtos,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ Rand: cfg.Rand,
+ RootCAs: cfg.RootCAs,
+ ServerName: cfg.ServerName,
+ SessionTicketKey: cfg.SessionTicketKey,
+ SessionTicketsDisabled: cfg.SessionTicketsDisabled,
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go
index 0e57cb122..e262cb557 100644
--- a/vendor/github.com/fsouza/go-dockerclient/volume.go
+++ b/vendor/github.com/fsouza/go-dockerclient/volume.go
@@ -8,6 +8,8 @@ import (
"encoding/json"
"errors"
"net/http"
+
+ "golang.org/x/net/context"
)
var (
@@ -22,9 +24,10 @@ var (
//
// See https://goo.gl/FZA4BK for more details.
type Volume struct {
- Name string `json:"Name" yaml:"Name"`
- Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
- Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"`
+ Name string `json:"Name" yaml:"Name"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
+ Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
}
// ListVolumesOptions specify parameters to the ListVolumes function.
@@ -32,19 +35,22 @@ type Volume struct {
// See https://goo.gl/FZA4BK for more details.
type ListVolumesOptions struct {
Filters map[string][]string
+ Context context.Context
}
// ListVolumes returns a list of available volumes in the server.
//
// See https://goo.gl/FZA4BK for more details.
func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) {
- resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{})
+ resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{
+ context: opts.Context,
+ })
if err != nil {
return nil, err
}
defer resp.Body.Close()
m := make(map[string]interface{})
- if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
+ if err = json.NewDecoder(resp.Body).Decode(&m); err != nil {
return nil, err
}
var volumes []Volume
@@ -69,13 +75,18 @@ type CreateVolumeOptions struct {
Name string
Driver string
DriverOpts map[string]string
+ Context context.Context `json:"-"`
+ Labels map[string]string
}
// CreateVolume creates a volume on the server.
//
// See https://goo.gl/pBUbZ9 for more details.
func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) {
- resp, err := c.do("POST", "/volumes/create", doOptions{data: opts})
+ resp, err := c.do("POST", "/volumes/create", doOptions{
+ data: opts,
+ context: opts.Context,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md
new file mode 100644
index 000000000..4ba6a8c64
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ChangeLog.md
@@ -0,0 +1,49 @@
+# Version 1.x.x
+
+* **Add more test cases and reference new test COM server project.** (Placeholder for future additions)
+
+# Version 1.2.0-alphaX
+
+**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.**
+
+ * Added CI configuration for Travis-CI and AppVeyor.
+ * Added test InterfaceID and ClassID for the COM Test Server project.
+ * Added more inline documentation (#83).
+ * Added IEnumVARIANT implementation (#88).
+ * Added IEnumVARIANT test cases (#99, #100, #101).
+ * Added support for retrieving `time.Time` from VARIANT (#92).
+ * Added test case for IUnknown (#64).
+ * Added test case for IDispatch (#64).
+ * Added test cases for scalar variants (#64, #76).
+
+# Version 1.1.1
+
+ * Fixes for Linux build.
+ * Fixes for Windows build.
+
+# Version 1.1.0
+
+The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes.
+
+ * Move GUID out of variables.go into its own file to make new documentation available.
+ * Move OleError out of ole.go into its own file to make new documentation available.
+ * Add documentation to utility functions.
+ * Add documentation to variant receiver functions.
+ * Add documentation to ole structures.
+ * Make variant available to other systems outside of Windows.
+ * Make OLE structures available to other systems outside of Windows.
+
+## New Features
+
+ * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows.
+ * More functions are now documented and available on godoc.org.
+
+# Version 1.0.1
+
+ 1. Fix package references from repository location change.
+
+# Version 1.0.0
+
+This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface.
+
+There is no changelog for this version. Check commits for history.
diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md
new file mode 100644
index 000000000..0ea9db33c
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/README.md
@@ -0,0 +1,46 @@
+#Go OLE
+
+[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28)
+[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole)
+[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole)
+
+Go bindings for Windows COM using shared libraries instead of cgo.
+
+By Yasuhiro Matsumoto.
+
+## Install
+
+To experiment with go-ole, you can just compile and run the example program:
+
+```
+go get github.com/go-ole/go-ole
+cd /path/to/go-ole/
+go test
+
+cd /path/to/go-ole/example/excel
+go run excel.go
+```
+
+## Continuous Integration
+
+Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run.
+
+**Travis-CI**
+
+Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server.
+
+**AppVeyor**
+
+AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server.
+
+The tests currently do run and do pass and this should be maintained with commits.
+
+##Versioning
+
+Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch.
+
+This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed.
+
+##LICENSE
+
+Under the MIT License: http://mattn.mit-license.org/2013
diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml
new file mode 100644
index 000000000..e66dd31a1
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/appveyor.yml
@@ -0,0 +1,63 @@
+# Notes:
+# - Minimal appveyor.yml file is an empty file. All sections are optional.
+# - Indent each level of configuration with 2 spaces. Do not use tabs!
+# - All section names are case-sensitive.
+# - Section names should be unique on each level.
+
+version: "1.3.0.{build}-alpha-{branch}"
+
+os: Windows Server 2012 R2
+
+branches:
+ only:
+ - master
+ - v1.2
+ - v1.1
+ - v1.0
+
+skip_tags: true
+
+clone_folder: c:\gopath\src\github.com\go-ole\go-ole
+
+environment:
+ GOPATH: c:\gopath
+ matrix:
+ - GOARCH: amd64
+ GOVERSION: 1.4
+ GOROOT: c:\go
+ DOWNLOADPLATFORM: "x64"
+
+install:
+ - choco install mingw
+ - SET PATH=c:\tools\mingw64\bin;%PATH%
+ # - Download COM Server
+ - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip"
+ - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL
+ - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat
+ # - set
+ - go version
+ - go env
+ - c:\gopath\src\github.com\go-ole\go-ole\build\compile-go.bat
+ - go tool dist install -v cmd/8a
+ - go tool dist install -v cmd/8c
+ - go tool dist install -v cmd/8g
+ - go tool dist install -v cmd/8l
+ - go tool dist install -v cmd/6a
+ - go tool dist install -v cmd/6c
+ - go tool dist install -v cmd/6g
+ - go tool dist install -v cmd/6l
+ - go get -u golang.org/x/tools/cmd/cover
+ - go get -u golang.org/x/tools/cmd/godoc
+ - go get -u golang.org/x/tools/cmd/stringer
+
+build_script:
+ - cd c:\gopath\src\github.com\go-ole\go-ole
+ - go get -v -t ./...
+ - go build
+ - go test -v -cover ./...
+
+# disable automatic tests
+test: off
+
+# disable deployment
+deploy: off
diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go
new file mode 100644
index 000000000..f224fa069
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/com.go
@@ -0,0 +1,329 @@
+// +build windows
+
+package ole
+
+import (
+ "errors"
+ "syscall"
+ "time"
+ "unicode/utf16"
+ "unsafe"
+)
+
+var (
+ procCoInitialize, _ = modole32.FindProc("CoInitialize")
+ procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx")
+ procCoUninitialize, _ = modole32.FindProc("CoUninitialize")
+ procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance")
+ procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree")
+ procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID")
+ procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString")
+ procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID")
+ procStringFromIID, _ = modole32.FindProc("StringFromIID")
+ procIIDFromString, _ = modole32.FindProc("IIDFromString")
+ procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID")
+ procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory")
+ procVariantInit, _ = modoleaut32.FindProc("VariantInit")
+ procVariantClear, _ = modoleaut32.FindProc("VariantClear")
+ procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime")
+ procSysAllocString, _ = modoleaut32.FindProc("SysAllocString")
+ procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen")
+ procSysFreeString, _ = modoleaut32.FindProc("SysFreeString")
+ procSysStringLen, _ = modoleaut32.FindProc("SysStringLen")
+ procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo")
+ procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch")
+ procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject")
+
+ procGetMessageW, _ = moduser32.FindProc("GetMessageW")
+ procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW")
+)
+
+// coInitialize initializes COM library on current thread.
+//
+// MSDN documentation suggests that this function should not be called. Call
+// CoInitializeEx() instead. The reason has to do with threading and this
+// function is only for single-threaded apartments.
+//
+// That said, most users of the library have gotten away with just this
+// function. If you are experiencing threading issues, then use
+// CoInitializeEx().
+func coInitialize() (err error) {
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx
+ // Suggests that no value should be passed to CoInitialized.
+ // Could just be Call() since the parameter is optional. <-- Needs testing to be sure.
+ hr, _, _ := procCoInitialize.Call(uintptr(0))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// coInitializeEx initializes COM library with concurrency model.
+func coInitializeEx(coinit uint32) (err error) {
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx
+ // Suggests that the first parameter is not only optional but should always be NULL.
+ hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// CoInitialize initializes COM library on current thread.
+//
+// MSDN documentation suggests that this function should not be called. Call
+// CoInitializeEx() instead. The reason has to do with threading and this
+// function is only for single-threaded apartments.
+//
+// That said, most users of the library have gotten away with just this
+// function. If you are experiencing threading issues, then use
+// CoInitializeEx().
+func CoInitialize(p uintptr) (err error) {
+ // p is ignored and won't be used.
+ // Avoid any variable not used errors.
+ p = uintptr(0)
+ return coInitialize()
+}
+
+// CoInitializeEx initializes COM library with concurrency model.
+func CoInitializeEx(p uintptr, coinit uint32) (err error) {
+ // Avoid any variable not used errors.
+ p = uintptr(0)
+ return coInitializeEx(coinit)
+}
+
+// CoUninitialize uninitializes COM Library.
+func CoUninitialize() {
+ procCoUninitialize.Call()
+}
+
+// CoTaskMemFree frees memory pointer.
+func CoTaskMemFree(memptr uintptr) {
+ procCoTaskMemFree.Call(memptr)
+}
+
+// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier.
+//
+// The Programmatic Identifier must be registered, because it will be looked up
+// in the Windows Registry. The registry entry has the following keys: CLSID,
+// Insertable, Protocol and Shell
+// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx).
+//
+// programID identifies the class id with less precision and is not guaranteed
+// to be unique. These are usually found in the registry under
+// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of
+// "Program.Component.Version" with version being optional.
+//
+// CLSIDFromProgID in Windows API.
+func CLSIDFromProgID(progId string) (clsid *GUID, err error) {
+ var guid GUID
+ lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
+ hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ clsid = &guid
+ return
+}
+
+// CLSIDFromString retrieves Class ID from string representation.
+//
+// This is technically the string version of the GUID and will convert the
+// string to object.
+//
+// CLSIDFromString in Windows API.
+func CLSIDFromString(str string) (clsid *GUID, err error) {
+ var guid GUID
+ lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str)))
+ hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ clsid = &guid
+ return
+}
+
+// StringFromCLSID returns GUID formated string from GUID object.
+func StringFromCLSID(clsid *GUID) (str string, err error) {
+ var p *uint16
+ hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ str = LpOleStrToString(p)
+ return
+}
+
+// IIDFromString returns GUID from program ID.
+func IIDFromString(progId string) (clsid *GUID, err error) {
+ var guid GUID
+ lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
+ hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ clsid = &guid
+ return
+}
+
+// StringFromIID returns GUID formatted string from GUID object.
+func StringFromIID(iid *GUID) (str string, err error) {
+ var p *uint16
+ hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ str = LpOleStrToString(p)
+ return
+}
+
+// CreateInstance of single uninitialized object with GUID.
+func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
+ if iid == nil {
+ iid = IID_IUnknown
+ }
+ hr, _, _ := procCoCreateInstance.Call(
+ uintptr(unsafe.Pointer(clsid)),
+ 0,
+ CLSCTX_SERVER,
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&unk)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// GetActiveObject retrieves pointer to active object.
+func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
+ if iid == nil {
+ iid = IID_IUnknown
+ }
+ hr, _, _ := procGetActiveObject.Call(
+ uintptr(unsafe.Pointer(clsid)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&unk)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// VariantInit initializes variant.
+func VariantInit(v *VARIANT) (err error) {
+ hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// VariantClear clears value in Variant settings to VT_EMPTY.
+func VariantClear(v *VARIANT) (err error) {
+ hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// SysAllocString allocates memory for string and copies string into memory.
+func SysAllocString(v string) (ss *int16) {
+ pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v))))
+ ss = (*int16)(unsafe.Pointer(pss))
+ return
+}
+
+// SysAllocStringLen copies up to length of given string returning pointer.
+func SysAllocStringLen(v string) (ss *int16) {
+ utf16 := utf16.Encode([]rune(v + "\x00"))
+ ptr := &utf16[0]
+
+ pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1))
+ ss = (*int16)(unsafe.Pointer(pss))
+ return
+}
+
+// SysFreeString frees string system memory. This must be called with SysAllocString.
+func SysFreeString(v *int16) (err error) {
+ hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// SysStringLen is the length of the system allocated string.
+func SysStringLen(v *int16) uint32 {
+ l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v)))
+ return uint32(l)
+}
+
+// CreateStdDispatch provides default IDispatch implementation for IUnknown.
+//
+// This handles default IDispatch implementation for objects. It haves a few
+// limitations with only supporting one language. It will also only return
+// default exception codes.
+func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) {
+ hr, _, _ := procCreateStdDispatch.Call(
+ uintptr(unsafe.Pointer(unk)),
+ v,
+ uintptr(unsafe.Pointer(ptinfo)),
+ uintptr(unsafe.Pointer(&disp)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch.
+//
+// This will not handle the full implementation of the interface.
+func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) {
+ hr, _, _ := procCreateDispTypeInfo.Call(
+ uintptr(unsafe.Pointer(idata)),
+ uintptr(GetUserDefaultLCID()),
+ uintptr(unsafe.Pointer(&pptinfo)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// copyMemory moves location of a block of memory.
+func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {
+ procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length))
+}
+
+// GetUserDefaultLCID retrieves current user default locale.
+func GetUserDefaultLCID() (lcid uint32) {
+ ret, _, _ := procGetUserDefaultLCID.Call()
+ lcid = uint32(ret)
+ return
+}
+
+// GetMessage in message queue from runtime.
+//
+// This function appears to block. PeekMessage does not block.
+func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) {
+ r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax))
+ ret = int32(r0)
+ return
+}
+
+// DispatchMessage to window procedure.
+func DispatchMessage(msg *Msg) (ret int32) {
+ r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg)))
+ ret = int32(r0)
+ return
+}
+
+// GetVariantDate converts COM Variant Time value to Go time.Time.
+func GetVariantDate(value float64) (time.Time, error) {
+ var st syscall.Systemtime
+ r, _, _ := procVariantTimeToSystemTime.Call(uintptr(unsafe.Pointer(&value)), uintptr(unsafe.Pointer(&st)))
+ if r != 0 {
+ return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), nil), nil
+ }
+ return time.Now(), errors.New("Could not convert to time, passing current time.")
+}
diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go
new file mode 100644
index 000000000..425aad323
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/com_func.go
@@ -0,0 +1,174 @@
+// +build !windows
+
+package ole
+
+import (
+ "time"
+ "unsafe"
+)
+
+// coInitialize initializes COM library on current thread.
+//
+// MSDN documentation suggests that this function should not be called. Call
+// CoInitializeEx() instead. The reason has to do with threading and this
+// function is only for single-threaded apartments.
+//
+// That said, most users of the library have gotten away with just this
+// function. If you are experiencing threading issues, then use
+// CoInitializeEx().
+func coInitialize() error {
+ return NewError(E_NOTIMPL)
+}
+
+// coInitializeEx initializes COM library with concurrency model.
+func coInitializeEx(coinit uint32) error {
+ return NewError(E_NOTIMPL)
+}
+
+// CoInitialize initializes COM library on current thread.
+//
+// MSDN documentation suggests that this function should not be called. Call
+// CoInitializeEx() instead. The reason has to do with threading and this
+// function is only for single-threaded apartments.
+//
+// That said, most users of the library have gotten away with just this
+// function. If you are experiencing threading issues, then use
+// CoInitializeEx().
+func CoInitialize(p uintptr) error {
+ return NewError(E_NOTIMPL)
+}
+
+// CoInitializeEx initializes COM library with concurrency model.
+func CoInitializeEx(p uintptr, coinit uint32) error {
+ return NewError(E_NOTIMPL)
+}
+
+// CoUninitialize uninitializes COM Library.
+func CoUninitialize() {}
+
+// CoTaskMemFree frees memory pointer.
+func CoTaskMemFree(memptr uintptr) {}
+
+// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier.
+//
+// The Programmatic Identifier must be registered, because it will be looked up
+// in the Windows Registry. The registry entry has the following keys: CLSID,
+// Insertable, Protocol and Shell
+// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx).
+//
+// programID identifies the class id with less precision and is not guaranteed
+// to be unique. These are usually found in the registry under
+// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of
+// "Program.Component.Version" with version being optional.
+//
+// CLSIDFromProgID in Windows API.
+func CLSIDFromProgID(progId string) (*GUID, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// CLSIDFromString retrieves Class ID from string representation.
+//
+// This is technically the string version of the GUID and will convert the
+// string to object.
+//
+// CLSIDFromString in Windows API.
+func CLSIDFromString(str string) (*GUID, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// StringFromCLSID returns GUID formated string from GUID object.
+func StringFromCLSID(clsid *GUID) (string, error) {
+ return "", NewError(E_NOTIMPL)
+}
+
+// IIDFromString returns GUID from program ID.
+func IIDFromString(progId string) (*GUID, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// StringFromIID returns GUID formatted string from GUID object.
+func StringFromIID(iid *GUID) (string, error) {
+ return "", NewError(E_NOTIMPL)
+}
+
+// CreateInstance of single uninitialized object with GUID.
+func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// GetActiveObject retrieves pointer to active object.
+func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// VariantInit initializes variant.
+func VariantInit(v *VARIANT) error {
+ return NewError(E_NOTIMPL)
+}
+
+// VariantClear clears value in Variant settings to VT_EMPTY.
+func VariantClear(v *VARIANT) error {
+ return NewError(E_NOTIMPL)
+}
+
+// SysAllocString allocates memory for string and copies string into memory.
+func SysAllocString(v string) *int16 {
+ u := int16(0)
+ return &u
+}
+
+// SysAllocStringLen copies up to length of given string returning pointer.
+func SysAllocStringLen(v string) *int16 {
+ u := int16(0)
+ return &u
+}
+
+// SysFreeString frees string system memory. This must be called with SysAllocString.
+func SysFreeString(v *int16) error {
+ return NewError(E_NOTIMPL)
+}
+
+// SysStringLen is the length of the system allocated string.
+func SysStringLen(v *int16) uint32 {
+ return uint32(0)
+}
+
+// CreateStdDispatch provides default IDispatch implementation for IUnknown.
+//
+// This handles default IDispatch implementation for objects. It haves a few
+// limitations with only supporting one language. It will also only return
+// default exception codes.
+func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch.
+//
+// This will not handle the full implementation of the interface.
+func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// copyMemory moves location of a block of memory.
+func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {}
+
+// GetUserDefaultLCID retrieves current user default locale.
+func GetUserDefaultLCID() uint32 {
+ return uint32(0)
+}
+
+// GetMessage in message queue from runtime.
+//
+// This function appears to block. PeekMessage does not block.
+func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) {
+ return int32(0), NewError(E_NOTIMPL)
+}
+
+// DispatchMessage to window procedure.
+func DispatchMessage(msg *Msg) int32 {
+ return int32(0)
+}
+
+func GetVariantDate(value float64) (time.Time, error) {
+ return time.Now(), NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go
new file mode 100644
index 000000000..b2ac2ec67
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/connect.go
@@ -0,0 +1,192 @@
+package ole
+
+// Connection contains IUnknown for fluent interface interaction.
+//
+// Deprecated. Use oleutil package instead.
+type Connection struct {
+ Object *IUnknown // Access COM
+}
+
+// Initialize COM.
+func (*Connection) Initialize() (err error) {
+ return coInitialize()
+}
+
+// Uninitialize COM.
+func (*Connection) Uninitialize() {
+ CoUninitialize()
+}
+
+// Create IUnknown object based first on ProgId and then from String.
+func (c *Connection) Create(progId string) (err error) {
+ var clsid *GUID
+ clsid, err = CLSIDFromProgID(progId)
+ if err != nil {
+ clsid, err = CLSIDFromString(progId)
+ if err != nil {
+ return
+ }
+ }
+
+ unknown, err := CreateInstance(clsid, IID_IUnknown)
+ if err != nil {
+ return
+ }
+ c.Object = unknown
+
+ return
+}
+
+// Release IUnknown object.
+func (c *Connection) Release() {
+ c.Object.Release()
+}
+
+// Load COM object from list of programIDs or strings.
+func (c *Connection) Load(names ...string) (errors []error) {
+ var tempErrors []error = make([]error, len(names))
+ var numErrors int = 0
+ for _, name := range names {
+ err := c.Create(name)
+ if err != nil {
+ tempErrors = append(tempErrors, err)
+ numErrors += 1
+ continue
+ }
+ break
+ }
+
+ copy(errors, tempErrors[0:numErrors])
+ return
+}
+
+// Dispatch returns Dispatch object.
+func (c *Connection) Dispatch() (object *Dispatch, err error) {
+ dispatch, err := c.Object.QueryInterface(IID_IDispatch)
+ if err != nil {
+ return
+ }
+ object = &Dispatch{dispatch}
+ return
+}
+
+// Dispatch stores IDispatch object.
+type Dispatch struct {
+ Object *IDispatch // Dispatch object.
+}
+
+// Call method on IDispatch with parameters.
+func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) {
+ id, err := d.GetId(method)
+ if err != nil {
+ return
+ }
+
+ result, err = d.Invoke(id, DISPATCH_METHOD, params)
+ return
+}
+
+// MustCall method on IDispatch with parameters.
+func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) {
+ id, err := d.GetId(method)
+ if err != nil {
+ panic(err)
+ }
+
+ result, err = d.Invoke(id, DISPATCH_METHOD, params)
+ if err != nil {
+ panic(err)
+ }
+
+ return
+}
+
+// Get property on IDispatch with parameters.
+func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) {
+ id, err := d.GetId(name)
+ if err != nil {
+ return
+ }
+ result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params)
+ return
+}
+
+// MustGet property on IDispatch with parameters.
+func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) {
+ id, err := d.GetId(name)
+ if err != nil {
+ panic(err)
+ }
+
+ result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params)
+ if err != nil {
+ panic(err)
+ }
+ return
+}
+
+// Set property on IDispatch with parameters.
+func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) {
+ id, err := d.GetId(name)
+ if err != nil {
+ return
+ }
+ result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params)
+ return
+}
+
+// MustSet property on IDispatch with parameters.
+func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) {
+ id, err := d.GetId(name)
+ if err != nil {
+ panic(err)
+ }
+
+ result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params)
+ if err != nil {
+ panic(err)
+ }
+ return
+}
+
+// GetId retrieves ID of name on IDispatch.
+func (d *Dispatch) GetId(name string) (id int32, err error) {
+ var dispid []int32
+ dispid, err = d.Object.GetIDsOfName([]string{name})
+ if err != nil {
+ return
+ }
+ id = dispid[0]
+ return
+}
+
+// GetIds retrieves all IDs of names on IDispatch.
+func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) {
+ dispid, err = d.Object.GetIDsOfName(names)
+ return
+}
+
+// Invoke IDispatch on DisplayID of dispatch type with parameters.
+//
+// There have been problems where if send cascading params..., it would error
+// out because the parameters would be empty.
+func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) {
+ if len(params) < 1 {
+ result, err = d.Object.Invoke(id, dispatch)
+ } else {
+ result, err = d.Object.Invoke(id, dispatch, params...)
+ }
+ return
+}
+
+// Release IDispatch object.
+func (d *Dispatch) Release() {
+ d.Object.Release()
+}
+
+// Connect initializes COM and attempts to load IUnknown based on given names.
+func Connect(names ...string) (connection *Connection) {
+ connection.Initialize()
+ connection.Load(names...)
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go
new file mode 100644
index 000000000..fd0c6d74b
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/constants.go
@@ -0,0 +1,153 @@
+package ole
+
+const (
+ CLSCTX_INPROC_SERVER = 1
+ CLSCTX_INPROC_HANDLER = 2
+ CLSCTX_LOCAL_SERVER = 4
+ CLSCTX_INPROC_SERVER16 = 8
+ CLSCTX_REMOTE_SERVER = 16
+ CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER
+ CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER
+ CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER
+)
+
+const (
+ COINIT_APARTMENTTHREADED = 0x2
+ COINIT_MULTITHREADED = 0x0
+ COINIT_DISABLE_OLE1DDE = 0x4
+ COINIT_SPEED_OVER_MEMORY = 0x8
+)
+
+const (
+ DISPATCH_METHOD = 1
+ DISPATCH_PROPERTYGET = 2
+ DISPATCH_PROPERTYPUT = 4
+ DISPATCH_PROPERTYPUTREF = 8
+)
+
+const (
+ S_OK = 0x00000000
+ E_UNEXPECTED = 0x8000FFFF
+ E_NOTIMPL = 0x80004001
+ E_OUTOFMEMORY = 0x8007000E
+ E_INVALIDARG = 0x80070057
+ E_NOINTERFACE = 0x80004002
+ E_POINTER = 0x80004003
+ E_HANDLE = 0x80070006
+ E_ABORT = 0x80004004
+ E_FAIL = 0x80004005
+ E_ACCESSDENIED = 0x80070005
+ E_PENDING = 0x8000000A
+
+ CO_E_CLASSSTRING = 0x800401F3
+)
+
+const (
+ CC_FASTCALL = iota
+ CC_CDECL
+ CC_MSCPASCAL
+ CC_PASCAL = CC_MSCPASCAL
+ CC_MACPASCAL
+ CC_STDCALL
+ CC_FPFASTCALL
+ CC_SYSCALL
+ CC_MPWCDECL
+ CC_MPWPASCAL
+ CC_MAX = CC_MPWPASCAL
+)
+
+type VT uint16
+
+const (
+ VT_EMPTY VT = 0x0
+ VT_NULL VT = 0x1
+ VT_I2 VT = 0x2
+ VT_I4 VT = 0x3
+ VT_R4 VT = 0x4
+ VT_R8 VT = 0x5
+ VT_CY VT = 0x6
+ VT_DATE VT = 0x7
+ VT_BSTR VT = 0x8
+ VT_DISPATCH VT = 0x9
+ VT_ERROR VT = 0xa
+ VT_BOOL VT = 0xb
+ VT_VARIANT VT = 0xc
+ VT_UNKNOWN VT = 0xd
+ VT_DECIMAL VT = 0xe
+ VT_I1 VT = 0x10
+ VT_UI1 VT = 0x11
+ VT_UI2 VT = 0x12
+ VT_UI4 VT = 0x13
+ VT_I8 VT = 0x14
+ VT_UI8 VT = 0x15
+ VT_INT VT = 0x16
+ VT_UINT VT = 0x17
+ VT_VOID VT = 0x18
+ VT_HRESULT VT = 0x19
+ VT_PTR VT = 0x1a
+ VT_SAFEARRAY VT = 0x1b
+ VT_CARRAY VT = 0x1c
+ VT_USERDEFINED VT = 0x1d
+ VT_LPSTR VT = 0x1e
+ VT_LPWSTR VT = 0x1f
+ VT_RECORD VT = 0x24
+ VT_INT_PTR VT = 0x25
+ VT_UINT_PTR VT = 0x26
+ VT_FILETIME VT = 0x40
+ VT_BLOB VT = 0x41
+ VT_STREAM VT = 0x42
+ VT_STORAGE VT = 0x43
+ VT_STREAMED_OBJECT VT = 0x44
+ VT_STORED_OBJECT VT = 0x45
+ VT_BLOB_OBJECT VT = 0x46
+ VT_CF VT = 0x47
+ VT_CLSID VT = 0x48
+ VT_BSTR_BLOB VT = 0xfff
+ VT_VECTOR VT = 0x1000
+ VT_ARRAY VT = 0x2000
+ VT_BYREF VT = 0x4000
+ VT_RESERVED VT = 0x8000
+ VT_ILLEGAL VT = 0xffff
+ VT_ILLEGALMASKED VT = 0xfff
+ VT_TYPEMASK VT = 0xfff
+)
+
+const (
+ DISPID_UNKNOWN = -1
+ DISPID_VALUE = 0
+ DISPID_PROPERTYPUT = -3
+ DISPID_NEWENUM = -4
+ DISPID_EVALUATE = -5
+ DISPID_CONSTRUCTOR = -6
+ DISPID_DESTRUCTOR = -7
+ DISPID_COLLECT = -8
+)
+
+const (
+ TKIND_ENUM = 1
+ TKIND_RECORD = 2
+ TKIND_MODULE = 3
+ TKIND_INTERFACE = 4
+ TKIND_DISPATCH = 5
+ TKIND_COCLASS = 6
+ TKIND_ALIAS = 7
+ TKIND_UNION = 8
+ TKIND_MAX = 9
+)
+
+// Safe Array Feature Flags
+
+const (
+ FADF_AUTO = 0x0001
+ FADF_STATIC = 0x0002
+ FADF_EMBEDDED = 0x0004
+ FADF_FIXEDSIZE = 0x0010
+ FADF_RECORD = 0x0020
+ FADF_HAVEIID = 0x0040
+ FADF_HAVEVARTYPE = 0x0080
+ FADF_BSTR = 0x0100
+ FADF_UNKNOWN = 0x0200
+ FADF_DISPATCH = 0x0400
+ FADF_VARIANT = 0x0800
+ FADF_RESERVED = 0xF008
+)
diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go
new file mode 100644
index 000000000..096b456d3
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/error.go
@@ -0,0 +1,51 @@
+package ole
+
+// OleError stores COM errors.
+type OleError struct {
+ hr uintptr
+ description string
+ subError error
+}
+
+// NewError creates new error with HResult.
+func NewError(hr uintptr) *OleError {
+ return &OleError{hr: hr}
+}
+
+// NewErrorWithDescription creates new COM error with HResult and description.
+func NewErrorWithDescription(hr uintptr, description string) *OleError {
+ return &OleError{hr: hr, description: description}
+}
+
+// NewErrorWithSubError creates new COM error with parent error.
+func NewErrorWithSubError(hr uintptr, description string, err error) *OleError {
+ return &OleError{hr: hr, description: description, subError: err}
+}
+
+// Code is the HResult.
+func (v *OleError) Code() uintptr {
+ return uintptr(v.hr)
+}
+
+// String description, either manually set or format message with error code.
+func (v *OleError) String() string {
+ if v.description != "" {
+ return errstr(int(v.hr)) + " (" + v.description + ")"
+ }
+ return errstr(int(v.hr))
+}
+
+// Error implements error interface.
+func (v *OleError) Error() string {
+ return v.String()
+}
+
+// Description retrieves error summary, if there is one.
+func (v *OleError) Description() string {
+ return v.description
+}
+
+// SubError returns parent error, if there is one.
+func (v *OleError) SubError() error {
+ return v.subError
+}
diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go
new file mode 100644
index 000000000..8a2ffaa27
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/error_func.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package ole
+
+// errstr converts error code to string.
+func errstr(errno int) string {
+ return ""
+}
diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go
new file mode 100644
index 000000000..d0e8e6859
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/error_windows.go
@@ -0,0 +1,24 @@
+// +build windows
+
+package ole
+
+import (
+ "fmt"
+ "syscall"
+ "unicode/utf16"
+)
+
+// errstr converts error code to string.
+func errstr(errno int) string {
+ // ask windows for the remaining errors
+ var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS
+ b := make([]uint16, 300)
+ n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil)
+ if err != nil {
+ return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err)
+ }
+ // trim terminating \r and \n
+ for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- {
+ }
+ return string(utf16.Decode(b[:n]))
+}
diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go
new file mode 100644
index 000000000..609ef0bfe
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/guid.go
@@ -0,0 +1,118 @@
+package ole
+
+var (
+ // IID_NULL is null Interface ID, used when no other Interface ID is known.
+ IID_NULL = &GUID{0x00000000, 0x0000, 0x0000, [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}
+
+ // IID_IUnknown is for IUnknown interfaces.
+ IID_IUnknown = &GUID{0x00000000, 0x0000, 0x0000, [8]byte{0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}
+
+ // IID_IDispatch is for IDispatch interfaces.
+ IID_IDispatch = &GUID{0x00020400, 0x0000, 0x0000, [8]byte{0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}
+
+ // IID_IEnumVariant is for IEnumVariant interfaces
+ IID_IEnumVariant = &GUID{0x00020404, 0x0000, 0x0000, [8]byte{0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}
+
+ // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces.
+ IID_IConnectionPointContainer = &GUID{0xB196B284, 0xBAB4, 0x101A, [8]byte{0xB6, 0x9C, 0x00, 0xAA, 0x00, 0x34, 0x1D, 0x07}}
+
+ // IID_IConnectionPoint is for IConnectionPoint interfaces.
+ IID_IConnectionPoint = &GUID{0xB196B286, 0xBAB4, 0x101A, [8]byte{0xB6, 0x9C, 0x00, 0xAA, 0x00, 0x34, 0x1D, 0x07}}
+
+ // IID_IInspectable is for IInspectable interfaces.
+ IID_IInspectable = &GUID{0xaf86e2e0, 0xb12d, 0x4c6a, [8]byte{0x9c, 0x5a, 0xd7, 0xaa, 0x65, 0x10, 0x1e, 0x90}}
+
+ // IID_IProvideClassInfo is for IProvideClassInfo interfaces.
+ IID_IProvideClassInfo = &GUID{0xb196b283, 0xbab4, 0x101a, [8]byte{0xB6, 0x9C, 0x00, 0xAA, 0x00, 0x34, 0x1D, 0x07}}
+)
+
+// These are for testing and not part of any library.
+var (
+ // IID_ICOMTestString is for ICOMTestString interfaces.
+ //
+ // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED}
+ IID_ICOMTestString = &GUID{0xe0133eb4, 0xc36f, 0x469a, [8]byte{0x9d, 0x3d, 0xc6, 0x6b, 0x84, 0xbe, 0x19, 0xed}}
+
+ // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces.
+ //
+ // {BEB06610-EB84-4155-AF58-E2BFF53608B4}
+ IID_ICOMTestInt8 = &GUID{0xbeb06610, 0xeb84, 0x4155, [8]byte{0xaf, 0x58, 0xe2, 0xbf, 0xf5, 0x36, 0x80, 0xb4}}
+
+ // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces.
+ //
+ // {DAA3F9FA-761E-4976-A860-8364CE55F6FC}
+ IID_ICOMTestInt16 = &GUID{0xdaa3f9fa, 0x761e, 0x4976, [8]byte{0xa8, 0x60, 0x83, 0x64, 0xce, 0x55, 0xf6, 0xfc}}
+
+ // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces.
+ //
+ // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}
+ IID_ICOMTestInt32 = &GUID{0xe3dedee7, 0x38a2, 0x4540, [8]byte{0x91, 0xd1, 0x2e, 0xef, 0x1d, 0x88, 0x91, 0xb0}}
+
+ // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces.
+ //
+ // {8D437CBC-B3ED-485C-BC32-C336432A1623}
+ IID_ICOMTestInt64 = &GUID{0x8d437cbc, 0xb3ed, 0x485c, [8]byte{0xbc, 0x32, 0xc3, 0x36, 0x43, 0x2a, 0x16, 0x23}}
+
+ // IID_ICOMTestFloat is for ICOMTestFloat interfaces.
+ //
+ // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C}
+ IID_ICOMTestFloat = &GUID{0xbf1ed004, 0xea02, 0x456a, [8]byte{0xaa, 0x55, 0x2a, 0xc8, 0xac, 0x6b, 0x5, 0x4c}}
+
+ // IID_ICOMTestDouble is for ICOMTestDouble interfaces.
+ //
+ // {BF908A81-8687-4E93-999F-D86FAB284BA0}
+ IID_ICOMTestDouble = &GUID{0xbf908a81, 0x8687, 0x4e93, [8]byte{0x99, 0x9f, 0xd8, 0x6f, 0xab, 0x28, 0x4b, 0xa0}}
+
+ // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces.
+ //
+ // {D530E7A6-4EE8-40D1-8931-3D63B8605001}
+ IID_ICOMTestBoolean = &GUID{0xd530e7a6, 0x4ee8, 0x40d1, [8]byte{0x89, 0x31, 0x3d, 0x63, 0xb8, 0x60, 0x50, 0x10}}
+
+ // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces.
+ //
+ // {6485B1EF-D780-4834-A4FE-1EBB51746CA3}
+ IID_ICOMEchoTestObject = &GUID{0x6485b1ef, 0xd780, 0x4834, [8]byte{0xa4, 0xfe, 0x1e, 0xbb, 0x51, 0x74, 0x6c, 0xa3}}
+
+ // IID_ICOMTestTypes is for ICOMTestTypes interfaces.
+ //
+ // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}
+ IID_ICOMTestTypes = &GUID{0xcca8d7ae, 0x91c0, 0x4277, [8]byte{0xa8, 0xb3, 0xff, 0x4e, 0xdf, 0x28, 0xd3, 0xc0}}
+
+ // CLSID_COMEchoTestObject is for COMEchoTestObject class.
+ //
+ // {3C24506A-AE9E-4D50-9157-EF317281F1B0}
+ CLSID_COMEchoTestObject = &GUID{0x3c24506a, 0xae9e, 0x4d50, [8]byte{0x91, 0x57, 0xef, 0x31, 0x72, 0x81, 0xf1, 0xb0}}
+
+ // CLSID_COMTestScalarClass is for COMTestScalarClass class.
+ //
+ // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}
+ CLSID_COMTestScalarClass = &GUID{0x865b85c5, 0x0334, 0x4ac6, [8]byte{0x9e, 0xf6, 0xaa, 0xce, 0xc8, 0xfc, 0x5e, 0x86}}
+)
+
+// GUID is Windows API specific GUID type.
+//
+// This exists to match Windows GUID type for direct passing for COM.
+// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx.
+type GUID struct {
+ Data1 uint32
+ Data2 uint16
+ Data3 uint16
+ Data4 [8]byte
+}
+
+// IsEqualGUID compares two GUID.
+//
+// Not constant time comparison.
+func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool {
+ return guid1.Data1 == guid2.Data1 &&
+ guid1.Data2 == guid2.Data2 &&
+ guid1.Data3 == guid2.Data3 &&
+ guid1.Data4[0] == guid2.Data4[0] &&
+ guid1.Data4[1] == guid2.Data4[1] &&
+ guid1.Data4[2] == guid2.Data4[2] &&
+ guid1.Data4[3] == guid2.Data4[3] &&
+ guid1.Data4[4] == guid2.Data4[4] &&
+ guid1.Data4[5] == guid2.Data4[5] &&
+ guid1.Data4[6] == guid2.Data4[6] &&
+ guid1.Data4[7] == guid2.Data4[7]
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go
new file mode 100644
index 000000000..9e6c49f41
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go
@@ -0,0 +1,20 @@
+package ole
+
+import "unsafe"
+
+type IConnectionPoint struct {
+ IUnknown
+}
+
+type IConnectionPointVtbl struct {
+ IUnknownVtbl
+ GetConnectionInterface uintptr
+ GetConnectionPointContainer uintptr
+ Advise uintptr
+ Unadvise uintptr
+ EnumConnections uintptr
+}
+
+func (v *IConnectionPoint) VTable() *IConnectionPointVtbl {
+ return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go
new file mode 100644
index 000000000..5414dc3cd
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go
@@ -0,0 +1,21 @@
+// +build !windows
+
+package ole
+
+import "unsafe"
+
+func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 {
+ return int32(0)
+}
+
+func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) {
+ return uint32(0), NewError(E_NOTIMPL)
+}
+
+func (v *IConnectionPoint) Unadvise(cookie uint32) error {
+ return NewError(E_NOTIMPL)
+}
+
+func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) {
+ return NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go
new file mode 100644
index 000000000..32bc18324
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go
@@ -0,0 +1,43 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 {
+ // XXX: This doesn't look like it does what it's supposed to
+ return release((*IUnknown)(unsafe.Pointer(v)))
+}
+
+func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) {
+ hr, _, _ := syscall.Syscall(
+ v.VTable().Advise,
+ 3,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(unknown)),
+ uintptr(unsafe.Pointer(&cookie)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) {
+ hr, _, _ := syscall.Syscall(
+ v.VTable().Unadvise,
+ 2,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(cookie),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error {
+ return NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go
new file mode 100644
index 000000000..165860d19
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go
@@ -0,0 +1,17 @@
+package ole
+
+import "unsafe"
+
+type IConnectionPointContainer struct {
+ IUnknown
+}
+
+type IConnectionPointContainerVtbl struct {
+ IUnknownVtbl
+ EnumConnectionPoints uintptr
+ FindConnectionPoint uintptr
+}
+
+func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl {
+ return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go
new file mode 100644
index 000000000..5dfa42aae
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package ole
+
+func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error {
+ return NewError(E_NOTIMPL)
+}
+
+func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error {
+ return NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go
new file mode 100644
index 000000000..ad30d79ef
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error {
+ return NewError(E_NOTIMPL)
+}
+
+func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) {
+ hr, _, _ := syscall.Syscall(
+ v.VTable().FindConnectionPoint,
+ 3,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(point)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go
new file mode 100644
index 000000000..d4af12409
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/idispatch.go
@@ -0,0 +1,94 @@
+package ole
+
+import "unsafe"
+
+type IDispatch struct {
+ IUnknown
+}
+
+type IDispatchVtbl struct {
+ IUnknownVtbl
+ GetTypeInfoCount uintptr
+ GetTypeInfo uintptr
+ GetIDsOfNames uintptr
+ Invoke uintptr
+}
+
+func (v *IDispatch) VTable() *IDispatchVtbl {
+ return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable))
+}
+
+func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) {
+ dispid, err = getIDsOfName(v, names)
+ return
+}
+
+func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) {
+ result, err = invoke(v, dispid, dispatch, params...)
+ return
+}
+
+func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) {
+ c, err = getTypeInfoCount(v)
+ return
+}
+
+func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) {
+ tinfo, err = getTypeInfo(v)
+ return
+}
+
+// GetSingleIDOfName is a helper that returns single display ID for IDispatch name.
+//
+// This replaces the common pattern of attempting to get a single name from the list of available
+// IDs. It gives the first ID, if it is available.
+func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) {
+ var displayIDs []int32
+ displayIDs, err = v.GetIDsOfName([]string{name})
+ if err != nil {
+ return
+ }
+ displayID = displayIDs[0]
+ return
+}
+
+// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke.
+//
+// Accepts name and will attempt to retrieve Display ID to pass to Invoke.
+//
+// Passing params as an array is a workaround that could be fixed in later versions of Go that
+// prevent passing empty params. During testing it was discovered that this is an acceptable way of
+// getting around not being able to pass params normally.
+func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) {
+ displayID, err := v.GetSingleIDOfName(name)
+ if err != nil {
+ return
+ }
+
+ if len(params) < 1 {
+ result, err = v.Invoke(displayID, dispatch)
+ } else {
+ result, err = v.Invoke(displayID, dispatch, params...)
+ }
+
+ return
+}
+
+// CallMethod invokes named function with arguments on object.
+func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) {
+ return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params)
+}
+
+// GetProperty retrieves the property with the name with the ability to pass arguments.
+//
+// Most of the time you will not need to pass arguments as most objects do not allow for this
+// feature. Or at least, should not allow for this feature. Some servers don't follow best practices
+// and this is provided for those edge cases.
+func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) {
+ return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params)
+}
+
+// PutProperty attempts to mutate a property in the object.
+func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) {
+ return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params)
+}
diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go
new file mode 100644
index 000000000..b8fbbe319
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/idispatch_func.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package ole
+
+func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) {
+ return []int32{}, NewError(E_NOTIMPL)
+}
+
+func getTypeInfoCount(disp *IDispatch) (uint32, error) {
+ return uint32(0), NewError(E_NOTIMPL)
+}
+
+func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) {
+ return nil, NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go
new file mode 100644
index 000000000..10b1ae464
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go
@@ -0,0 +1,196 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) {
+ wnames := make([]*uint16, len(names))
+ for i := 0; i < len(names); i++ {
+ wnames[i] = syscall.StringToUTF16Ptr(names[i])
+ }
+ dispid = make([]int32, len(names))
+ namelen := uint32(len(names))
+ hr, _, _ := syscall.Syscall6(
+ disp.VTable().GetIDsOfNames,
+ 6,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(unsafe.Pointer(IID_NULL)),
+ uintptr(unsafe.Pointer(&wnames[0])),
+ uintptr(namelen),
+ uintptr(GetUserDefaultLCID()),
+ uintptr(unsafe.Pointer(&dispid[0])))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func getTypeInfoCount(disp *IDispatch) (c uint32, err error) {
+ hr, _, _ := syscall.Syscall(
+ disp.VTable().GetTypeInfoCount,
+ 2,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(unsafe.Pointer(&c)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) {
+ hr, _, _ := syscall.Syscall(
+ disp.VTable().GetTypeInfo,
+ 3,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(GetUserDefaultLCID()),
+ uintptr(unsafe.Pointer(&tinfo)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) {
+ var dispparams DISPPARAMS
+
+ if dispatch&DISPATCH_PROPERTYPUT != 0 {
+ dispnames := [1]int32{DISPID_PROPERTYPUT}
+ dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0]))
+ dispparams.cNamedArgs = 1
+ }
+ var vargs []VARIANT
+ if len(params) > 0 {
+ vargs = make([]VARIANT, len(params))
+ for i, v := range params {
+ //n := len(params)-i-1
+ n := len(params) - i - 1
+ VariantInit(&vargs[n])
+ switch vv := v.(type) {
+ case bool:
+ if vv {
+ vargs[n] = NewVariant(VT_BOOL, 0xffff)
+ } else {
+ vargs[n] = NewVariant(VT_BOOL, 0)
+ }
+ case *bool:
+ vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool)))))
+ case uint8:
+ vargs[n] = NewVariant(VT_I1, int64(v.(uint8)))
+ case *uint8:
+ vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8)))))
+ case int8:
+ vargs[n] = NewVariant(VT_I1, int64(v.(int8)))
+ case *int8:
+ vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8)))))
+ case int16:
+ vargs[n] = NewVariant(VT_I2, int64(v.(int16)))
+ case *int16:
+ vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16)))))
+ case uint16:
+ vargs[n] = NewVariant(VT_UI2, int64(v.(uint16)))
+ case *uint16:
+ vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16)))))
+ case int32:
+ vargs[n] = NewVariant(VT_I4, int64(v.(int32)))
+ case *int32:
+ vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32)))))
+ case uint32:
+ vargs[n] = NewVariant(VT_UI4, int64(v.(uint32)))
+ case *uint32:
+ vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32)))))
+ case int64:
+ vargs[n] = NewVariant(VT_I8, int64(v.(int64)))
+ case *int64:
+ vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64)))))
+ case uint64:
+ vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64))))
+ case *uint64:
+ vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64)))))
+ case int:
+ vargs[n] = NewVariant(VT_I4, int64(v.(int)))
+ case *int:
+ vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int)))))
+ case uint:
+ vargs[n] = NewVariant(VT_UI4, int64(v.(uint)))
+ case *uint:
+ vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint)))))
+ case float32:
+ vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv)))
+ case *float32:
+ vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32)))))
+ case float64:
+ vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv)))
+ case *float64:
+ vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64)))))
+ case string:
+ vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string))))))
+ case *string:
+ vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string)))))
+ case time.Time:
+ s := vv.Format("2006-01-02 15:04:05")
+ vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s)))))
+ case *time.Time:
+ s := vv.Format("2006-01-02 15:04:05")
+ vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s))))
+ case *IDispatch:
+ vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch)))))
+ case **IDispatch:
+ vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch)))))
+ case nil:
+ vargs[n] = NewVariant(VT_NULL, 0)
+ case *VARIANT:
+ vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT)))))
+ case []byte:
+ safeByteArray := safeArrayFromByteSlice(v.([]byte))
+ vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray))))
+ defer VariantClear(&vargs[n])
+ case []string:
+ safeByteArray := safeArrayFromStringSlice(v.([]string))
+ vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray))))
+ defer VariantClear(&vargs[n])
+ default:
+ panic("unknown type")
+ }
+ }
+ dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0]))
+ dispparams.cArgs = uint32(len(params))
+ }
+
+ result = new(VARIANT)
+ var excepInfo EXCEPINFO
+ VariantInit(result)
+ hr, _, _ := syscall.Syscall9(
+ disp.VTable().Invoke,
+ 9,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(dispid),
+ uintptr(unsafe.Pointer(IID_NULL)),
+ uintptr(GetUserDefaultLCID()),
+ uintptr(dispatch),
+ uintptr(unsafe.Pointer(&dispparams)),
+ uintptr(unsafe.Pointer(result)),
+ uintptr(unsafe.Pointer(&excepInfo)),
+ 0)
+ if hr != 0 {
+ err = NewErrorWithSubError(hr, BstrToString(excepInfo.bstrDescription), excepInfo)
+ }
+ for _, varg := range vargs {
+ if varg.VT == VT_BSTR && varg.Val != 0 {
+ SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val)))))
+ }
+ /*
+ if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 {
+ *(params[n].(*string)) = LpOleStrToString((*uint16)(unsafe.Pointer(uintptr(varg.Val))))
+ println(*(params[n].(*string)))
+ fmt.Fprintln(os.Stderr, *(params[n].(*string)))
+ }
+ */
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go
new file mode 100644
index 000000000..243389754
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ienumvariant.go
@@ -0,0 +1,19 @@
+package ole
+
+import "unsafe"
+
+type IEnumVARIANT struct {
+ IUnknown
+}
+
+type IEnumVARIANTVtbl struct {
+ IUnknownVtbl
+ Next uintptr
+ Skip uintptr
+ Reset uintptr
+ Clone uintptr
+}
+
+func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl {
+ return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go
new file mode 100644
index 000000000..c14848199
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package ole
+
+func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+func (enum *IEnumVARIANT) Reset() error {
+ return NewError(E_NOTIMPL)
+}
+
+func (enum *IEnumVARIANT) Skip(celt uint) error {
+ return NewError(E_NOTIMPL)
+}
+
+func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) {
+ return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go
new file mode 100644
index 000000000..4781f3b8b
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go
@@ -0,0 +1,63 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) {
+ hr, _, _ := syscall.Syscall(
+ enum.VTable().Clone,
+ 2,
+ uintptr(unsafe.Pointer(enum)),
+ uintptr(unsafe.Pointer(&cloned)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (enum *IEnumVARIANT) Reset() (err error) {
+ hr, _, _ := syscall.Syscall(
+ enum.VTable().Reset,
+ 1,
+ uintptr(unsafe.Pointer(enum)),
+ 0,
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (enum *IEnumVARIANT) Skip(celt uint) (err error) {
+ hr, _, _ := syscall.Syscall(
+ enum.VTable().Skip,
+ 2,
+ uintptr(unsafe.Pointer(enum)),
+ uintptr(celt),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) {
+ hr, _, _ := syscall.Syscall6(
+ enum.VTable().Next,
+ 4,
+ uintptr(unsafe.Pointer(enum)),
+ uintptr(celt),
+ uintptr(unsafe.Pointer(&array)),
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go
new file mode 100644
index 000000000..f4a19e253
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iinspectable.go
@@ -0,0 +1,18 @@
+package ole
+
+import "unsafe"
+
+type IInspectable struct {
+ IUnknown
+}
+
+type IInspectableVtbl struct {
+ IUnknownVtbl
+ GetIIds uintptr
+ GetRuntimeClassName uintptr
+ GetTrustLevel uintptr
+}
+
+func (v *IInspectable) VTable() *IInspectableVtbl {
+ return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go
new file mode 100644
index 000000000..348829bf0
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iinspectable_func.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package ole
+
+func (v *IInspectable) GetIids() ([]*GUID, error) {
+ return []*GUID{}, NewError(E_NOTIMPL)
+}
+
+func (v *IInspectable) GetRuntimeClassName() (string, error) {
+ return "", NewError(E_NOTIMPL)
+}
+
+func (v *IInspectable) GetTrustLevel() (uint32, error) {
+ return uint32(0), NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go
new file mode 100644
index 000000000..b19dde5b5
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go
@@ -0,0 +1,72 @@
+// +build windows
+
+package ole
+
+import (
+ "bytes"
+ "encoding/binary"
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+func (v *IInspectable) GetIids() (iids []*GUID, err error) {
+ var count uint32
+ var array uintptr
+ hr, _, _ := syscall.Syscall(
+ v.VTable().GetIIds,
+ 3,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(&count)),
+ uintptr(unsafe.Pointer(&array)))
+ if hr != 0 {
+ err = NewError(hr)
+ return
+ }
+ defer CoTaskMemFree(array)
+
+ iids = make([]*GUID, count)
+ byteCount := count * uint32(unsafe.Sizeof(GUID{}))
+ slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)}
+ byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr))
+ reader := bytes.NewReader(byteSlice)
+ for i, _ := range iids {
+ guid := GUID{}
+ err = binary.Read(reader, binary.LittleEndian, &guid)
+ if err != nil {
+ return
+ }
+ iids[i] = &guid
+ }
+ return
+}
+
+func (v *IInspectable) GetRuntimeClassName() (s string, err error) {
+ var hstring HString
+ hr, _, _ := syscall.Syscall(
+ v.VTable().GetRuntimeClassName,
+ 2,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(&hstring)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ return
+ }
+ s = hstring.String()
+ DeleteHString(hstring)
+ return
+}
+
+func (v *IInspectable) GetTrustLevel() (level uint32, err error) {
+ hr, _, _ := syscall.Syscall(
+ v.VTable().GetTrustLevel,
+ 2,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(&level)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go
new file mode 100644
index 000000000..25f3a6f24
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go
@@ -0,0 +1,21 @@
+package ole
+
+import "unsafe"
+
+type IProvideClassInfo struct {
+ IUnknown
+}
+
+type IProvideClassInfoVtbl struct {
+ IUnknownVtbl
+ GetClassInfo uintptr
+}
+
+func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl {
+ return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable))
+}
+
+func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) {
+ cinfo, err = getClassInfo(v)
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go
new file mode 100644
index 000000000..7e3cb63ea
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package ole
+
+func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) {
+ return nil, NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go
new file mode 100644
index 000000000..2ad016394
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go
@@ -0,0 +1,21 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) {
+ hr, _, _ := syscall.Syscall(
+ disp.VTable().GetClassInfo,
+ 2,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(unsafe.Pointer(&tinfo)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go
new file mode 100644
index 000000000..dd3c5e21b
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/itypeinfo.go
@@ -0,0 +1,34 @@
+package ole
+
+import "unsafe"
+
+type ITypeInfo struct {
+ IUnknown
+}
+
+type ITypeInfoVtbl struct {
+ IUnknownVtbl
+ GetTypeAttr uintptr
+ GetTypeComp uintptr
+ GetFuncDesc uintptr
+ GetVarDesc uintptr
+ GetNames uintptr
+ GetRefTypeOfImplType uintptr
+ GetImplTypeFlags uintptr
+ GetIDsOfNames uintptr
+ Invoke uintptr
+ GetDocumentation uintptr
+ GetDllEntry uintptr
+ GetRefTypeInfo uintptr
+ AddressOfMember uintptr
+ CreateInstance uintptr
+ GetMops uintptr
+ GetContainingTypeLib uintptr
+ ReleaseTypeAttr uintptr
+ ReleaseFuncDesc uintptr
+ ReleaseVarDesc uintptr
+}
+
+func (v *ITypeInfo) VTable() *ITypeInfoVtbl {
+ return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go
new file mode 100644
index 000000000..8364a659b
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package ole
+
+func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) {
+ return nil, NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go
new file mode 100644
index 000000000..54782b3da
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go
@@ -0,0 +1,21 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) {
+ hr, _, _ := syscall.Syscall(
+ uintptr(v.VTable().GetTypeAttr),
+ 2,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(&tattr)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go
new file mode 100644
index 000000000..108f28ea6
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iunknown.go
@@ -0,0 +1,57 @@
+package ole
+
+import "unsafe"
+
+type IUnknown struct {
+ RawVTable *interface{}
+}
+
+type IUnknownVtbl struct {
+ QueryInterface uintptr
+ AddRef uintptr
+ Release uintptr
+}
+
+type UnknownLike interface {
+ QueryInterface(iid *GUID) (disp *IDispatch, err error)
+ AddRef() int32
+ Release() int32
+}
+
+func (v *IUnknown) VTable() *IUnknownVtbl {
+ return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable))
+}
+
+func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error {
+ return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj)
+}
+
+func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) {
+ err = v.PutQueryInterface(interfaceID, &dispatch)
+ return
+}
+
+func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) {
+ err = v.PutQueryInterface(interfaceID, &enum)
+ return
+}
+
+func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) {
+ return queryInterface(v, iid)
+}
+
+func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) {
+ unk, err := queryInterface(v, iid)
+ if err != nil {
+ panic(err)
+ }
+ return unk
+}
+
+func (v *IUnknown) AddRef() int32 {
+ return addRef(v)
+}
+
+func (v *IUnknown) Release() int32 {
+ return release(v)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go
new file mode 100644
index 000000000..d0a62cfd7
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iunknown_func.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package ole
+
+func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) {
+ return NewError(E_NOTIMPL)
+}
+
+func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+func addRef(unk *IUnknown) int32 {
+ return 0
+}
+
+func release(unk *IUnknown) int32 {
+ return 0
+}
diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go
new file mode 100644
index 000000000..ede5bb8c1
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iunknown_windows.go
@@ -0,0 +1,58 @@
+// +build windows
+
+package ole
+
+import (
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) {
+ selfValue := reflect.ValueOf(self).Elem()
+ objValue := reflect.ValueOf(obj).Elem()
+
+ hr, _, _ := syscall.Syscall(
+ method,
+ 3,
+ selfValue.UnsafeAddr(),
+ uintptr(unsafe.Pointer(interfaceID)),
+ objValue.Addr().Pointer())
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) {
+ hr, _, _ := syscall.Syscall(
+ unk.VTable().QueryInterface,
+ 3,
+ uintptr(unsafe.Pointer(unk)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&disp)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func addRef(unk *IUnknown) int32 {
+ ret, _, _ := syscall.Syscall(
+ unk.VTable().AddRef,
+ 1,
+ uintptr(unsafe.Pointer(unk)),
+ 0,
+ 0)
+ return int32(ret)
+}
+
+func release(unk *IUnknown) int32 {
+ ret, _, _ := syscall.Syscall(
+ unk.VTable().Release,
+ 1,
+ uintptr(unsafe.Pointer(unk)),
+ 0,
+ 0)
+ return int32(ret)
+}
diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go
new file mode 100644
index 000000000..b92b4ea18
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ole.go
@@ -0,0 +1,147 @@
+package ole
+
+import (
+ "fmt"
+ "strings"
+)
+
+// DISPPARAMS are the arguments that passed to methods or property.
+type DISPPARAMS struct {
+ rgvarg uintptr
+ rgdispidNamedArgs uintptr
+ cArgs uint32
+ cNamedArgs uint32
+}
+
+// EXCEPINFO defines exception info.
+type EXCEPINFO struct {
+ wCode uint16
+ wReserved uint16
+ bstrSource *uint16
+ bstrDescription *uint16
+ bstrHelpFile *uint16
+ dwHelpContext uint32
+ pvReserved uintptr
+ pfnDeferredFillIn uintptr
+ scode uint32
+}
+
+// String convert EXCEPINFO to string.
+func (e EXCEPINFO) String() string {
+ var src, desc, hlp string
+ if e.bstrSource == nil {
+ src = ""
+ } else {
+ src = BstrToString(e.bstrSource)
+ }
+
+ if e.bstrDescription == nil {
+ desc = ""
+ } else {
+ desc = BstrToString(e.bstrDescription)
+ }
+
+ if e.bstrHelpFile == nil {
+ hlp = ""
+ } else {
+ hlp = BstrToString(e.bstrHelpFile)
+ }
+
+ return fmt.Sprintf(
+ "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x",
+ e.wCode, src, desc, hlp, e.dwHelpContext, e.scode,
+ )
+}
+
+// Error implements error interface and returns error string.
+func (e EXCEPINFO) Error() string {
+ if e.bstrDescription != nil {
+ return strings.TrimSpace(BstrToString(e.bstrDescription))
+ }
+
+ src := "Unknown"
+ if e.bstrSource != nil {
+ src = BstrToString(e.bstrSource)
+ }
+
+ code := e.scode
+ if e.wCode != 0 {
+ code = uint32(e.wCode)
+ }
+
+ return fmt.Sprintf("%v: %#x", src, code)
+}
+
+// PARAMDATA defines parameter data type.
+type PARAMDATA struct {
+ Name *int16
+ Vt uint16
+}
+
+// METHODDATA defines method info.
+type METHODDATA struct {
+ Name *uint16
+ Data *PARAMDATA
+ Dispid int32
+ Meth uint32
+ CC int32
+ CArgs uint32
+ Flags uint16
+ VtReturn uint32
+}
+
+// INTERFACEDATA defines interface info.
+type INTERFACEDATA struct {
+ MethodData *METHODDATA
+ CMembers uint32
+}
+
+// Point is 2D vector type.
+type Point struct {
+ X int32
+ Y int32
+}
+
+// Msg is message between processes.
+type Msg struct {
+ Hwnd uint32
+ Message uint32
+ Wparam int32
+ Lparam int32
+ Time uint32
+ Pt Point
+}
+
+// TYPEDESC defines data type.
+type TYPEDESC struct {
+ Hreftype uint32
+ VT uint16
+}
+
+// IDLDESC defines IDL info.
+type IDLDESC struct {
+ DwReserved uint32
+ WIDLFlags uint16
+}
+
+// TYPEATTR defines type info.
+type TYPEATTR struct {
+ Guid GUID
+ Lcid uint32
+ dwReserved uint32
+ MemidConstructor int32
+ MemidDestructor int32
+ LpstrSchema *uint16
+ CbSizeInstance uint32
+ Typekind int32
+ CFuncs uint16
+ CVars uint16
+ CImplTypes uint16
+ CbSizeVft uint16
+ CbAlignment uint16
+ WTypeFlags uint16
+ WMajorVerNum uint16
+ WMinorVerNum uint16
+ TdescAlias TYPEDESC
+ IdldescType IDLDESC
+}
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go
new file mode 100644
index 000000000..60df73cda
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/connection.go
@@ -0,0 +1,100 @@
+// +build windows
+
+package oleutil
+
+import (
+ "reflect"
+ "unsafe"
+
+ ole "github.com/go-ole/go-ole"
+)
+
+type stdDispatch struct {
+ lpVtbl *stdDispatchVtbl
+ ref int32
+ iid *ole.GUID
+ iface interface{}
+ funcMap map[string]int32
+}
+
+type stdDispatchVtbl struct {
+ pQueryInterface uintptr
+ pAddRef uintptr
+ pRelease uintptr
+ pGetTypeInfoCount uintptr
+ pGetTypeInfo uintptr
+ pGetIDsOfNames uintptr
+ pInvoke uintptr
+}
+
+func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ *punk = nil
+ if ole.IsEqualGUID(iid, ole.IID_IUnknown) ||
+ ole.IsEqualGUID(iid, ole.IID_IDispatch) {
+ dispAddRef(this)
+ *punk = this
+ return ole.S_OK
+ }
+ if ole.IsEqualGUID(iid, pthis.iid) {
+ dispAddRef(this)
+ *punk = this
+ return ole.S_OK
+ }
+ return ole.E_NOINTERFACE
+}
+
+func dispAddRef(this *ole.IUnknown) int32 {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ pthis.ref++
+ return pthis.ref
+}
+
+func dispRelease(this *ole.IUnknown) int32 {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ pthis.ref--
+ return pthis.ref
+}
+
+func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ names := make([]string, len(wnames))
+ for i := 0; i < len(names); i++ {
+ names[i] = ole.LpOleStrToString(wnames[i])
+ }
+ for n := 0; n < namelen; n++ {
+ if id, ok := pthis.funcMap[names[n]]; ok {
+ pdisp[n] = id
+ }
+ }
+ return ole.S_OK
+}
+
+func dispGetTypeInfoCount(pcount *int) uintptr {
+ if pcount != nil {
+ *pcount = 0
+ }
+ return ole.S_OK
+}
+
+func dispGetTypeInfo(ptypeif *uintptr) uintptr {
+ return ole.E_NOTIMPL
+}
+
+func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ found := ""
+ for name, id := range pthis.funcMap {
+ if id == dispid {
+ found = name
+ }
+ }
+ if found != "" {
+ rv := reflect.ValueOf(pthis.iface).Elem()
+ rm := rv.MethodByName(found)
+ rr := rm.Call([]reflect.Value{})
+ println(len(rr))
+ return ole.S_OK
+ }
+ return ole.E_NOTIMPL
+}
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go
new file mode 100644
index 000000000..8818fb827
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package oleutil
+
+import ole "github.com/go-ole/go-ole"
+
+// ConnectObject creates a connection point between two services for communication.
+func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) {
+ return 0, ole.NewError(ole.E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go
new file mode 100644
index 000000000..6b5c05999
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go
@@ -0,0 +1,57 @@
+// +build windows
+
+package oleutil
+
+import (
+ "reflect"
+ "syscall"
+ "unsafe"
+
+ ole "github.com/go-ole/go-ole"
+)
+
+// ConnectObject creates a connection point between two services for communication.
+func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) {
+ unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer)
+ if err != nil {
+ return
+ }
+
+ container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown))
+ var point *ole.IConnectionPoint
+ err = container.FindConnectionPoint(iid, &point)
+ if err != nil {
+ return
+ }
+ if edisp, ok := idisp.(*ole.IUnknown); ok {
+ cookie, err = point.Advise(edisp)
+ container.Release()
+ if err != nil {
+ return
+ }
+ }
+ rv := reflect.ValueOf(disp).Elem()
+ if rv.Type().Kind() == reflect.Struct {
+ dest := &stdDispatch{}
+ dest.lpVtbl = &stdDispatchVtbl{}
+ dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface)
+ dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef)
+ dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease)
+ dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount)
+ dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo)
+ dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames)
+ dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke)
+ dest.iface = disp
+ dest.iid = iid
+ cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest)))
+ container.Release()
+ if err != nil {
+ point.Release()
+ return
+ }
+ }
+
+ container.Release()
+
+ return 0, ole.NewError(ole.E_INVALIDARG)
+}
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go
new file mode 100644
index 000000000..58347628f
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go
@@ -0,0 +1,6 @@
+// This file is here so go get succeeds as without it errors with:
+// no buildable Go source files in ...
+//
+// +build !windows
+
+package oleutil
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go
new file mode 100644
index 000000000..55e072a63
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go
@@ -0,0 +1,89 @@
+package oleutil
+
+import ole "github.com/go-ole/go-ole"
+
+// ClassIDFrom retrieves class ID whether given is program ID or application string.
+func ClassIDFrom(programID string) (classID *ole.GUID, err error) {
+ return ole.ClassIDFrom(programID)
+}
+
+// CreateObject creates object from programID based on interface type.
+//
+// Only supports IUnknown.
+//
+// Program ID can be either program ID or application string.
+func CreateObject(programID string) (unknown *ole.IUnknown, err error) {
+ classID, err := ole.ClassIDFrom(programID)
+ if err != nil {
+ return
+ }
+
+ unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown)
+ if err != nil {
+ return
+ }
+
+ return
+}
+
+// GetActiveObject retrieves active object for program ID and interface ID based
+// on interface type.
+//
+// Only supports IUnknown.
+//
+// Program ID can be either program ID or application string.
+func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) {
+ classID, err := ole.ClassIDFrom(programID)
+ if err != nil {
+ return
+ }
+
+ unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown)
+ if err != nil {
+ return
+ }
+
+ return
+}
+
+// CallMethod calls method on IDispatch with parameters.
+func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
+ return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params)
+}
+
+// MustCallMethod calls method on IDispatch with parameters or panics.
+func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
+ r, err := CallMethod(disp, name, params...)
+ if err != nil {
+ panic(err.Error())
+ }
+ return r
+}
+
+// GetProperty retrieves property from IDispatch.
+func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
+ return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params)
+}
+
+// MustGetProperty retrieves property from IDispatch or panics.
+func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
+ r, err := GetProperty(disp, name, params...)
+ if err != nil {
+ panic(err.Error())
+ }
+ return r
+}
+
+// PutProperty mutates property.
+func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
+ return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params)
+}
+
+// MustPutProperty mutates property or panics.
+func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
+ r, err := PutProperty(disp, name, params...)
+ if err != nil {
+ panic(err.Error())
+ }
+ return r
+}
diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go
new file mode 100644
index 000000000..a5201b56c
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearray.go
@@ -0,0 +1,27 @@
+// Package is meant to retrieve and process safe array data returned from COM.
+
+package ole
+
+// SafeArrayBound defines the SafeArray boundaries.
+type SafeArrayBound struct {
+ Elements uint32
+ LowerBound int32
+}
+
+// SafeArray is how COM handles arrays.
+type SafeArray struct {
+ Dimensions uint16
+ FeaturesFlag uint16
+ ElementsSize uint32
+ LocksAmount uint32
+ Data uint32
+ Bounds [16]byte
+}
+
+// SAFEARRAY is obsolete, exists for backwards compatibility.
+// Use SafeArray
+type SAFEARRAY SafeArray
+
+// SAFEARRAYBOUND is obsolete, exists for backwards compatibility.
+// Use SafeArrayBound
+type SAFEARRAYBOUND SafeArrayBound
diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go
new file mode 100644
index 000000000..c261a0078
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearray_func.go
@@ -0,0 +1,207 @@
+// +build !windows
+
+package ole
+
+// safeArrayAccessData returns raw array pointer.
+//
+// AKA: SafeArrayAccessData in Windows API.
+func safeArrayAccessData(safearray *SafeArray) (uintptr, error) {
+ return uintptr(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayUnaccessData releases raw array.
+//
+// AKA: SafeArrayUnaccessData in Windows API.
+func safeArrayUnaccessData(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayAllocData allocates SafeArray.
+//
+// AKA: SafeArrayAllocData in Windows API.
+func safeArrayAllocData(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayAllocDescriptor allocates SafeArray.
+//
+// AKA: SafeArrayAllocDescriptor in Windows API.
+func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayAllocDescriptorEx allocates SafeArray.
+//
+// AKA: SafeArrayAllocDescriptorEx in Windows API.
+func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCopy returns copy of SafeArray.
+//
+// AKA: SafeArrayCopy in Windows API.
+func safeArrayCopy(original *SafeArray) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCopyData duplicates SafeArray into another SafeArray object.
+//
+// AKA: SafeArrayCopyData in Windows API.
+func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayCreate creates SafeArray.
+//
+// AKA: SafeArrayCreate in Windows API.
+func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCreateEx creates SafeArray.
+//
+// AKA: SafeArrayCreateEx in Windows API.
+func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCreateVector creates SafeArray.
+//
+// AKA: SafeArrayCreateVector in Windows API.
+func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCreateVectorEx creates SafeArray.
+//
+// AKA: SafeArrayCreateVectorEx in Windows API.
+func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayDestroy destroys SafeArray object.
+//
+// AKA: SafeArrayDestroy in Windows API.
+func safeArrayDestroy(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayDestroyData destroys SafeArray object.
+//
+// AKA: SafeArrayDestroyData in Windows API.
+func safeArrayDestroyData(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayDestroyDescriptor destroys SafeArray object.
+//
+// AKA: SafeArrayDestroyDescriptor in Windows API.
+func safeArrayDestroyDescriptor(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayGetDim is the amount of dimensions in the SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetDim in Windows API.
+func safeArrayGetDim(safearray *SafeArray) (*uint32, error) {
+ u := uint32(0)
+ return &u, NewError(E_NOTIMPL)
+}
+
+// safeArrayGetElementSize is the element size in bytes.
+//
+// AKA: SafeArrayGetElemsize in Windows API.
+func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) {
+ u := uint32(0)
+ return &u, NewError(E_NOTIMPL)
+}
+
+// safeArrayGetElement retrieves element at given index.
+func safeArrayGetElement(safearray *SafeArray, index int64) (uintptr, error) {
+ return uintptr(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayGetElement retrieves element at given index and converts to string.
+func safeArrayGetElementString(safearray *SafeArray, index int64) (string, error) {
+ return "", NewError(E_NOTIMPL)
+}
+
+// safeArrayGetIID is the InterfaceID of the elements in the SafeArray.
+//
+// AKA: SafeArrayGetIID in Windows API.
+func safeArrayGetIID(safearray *SafeArray) (*GUID, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayGetLBound returns lower bounds of SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetLBound in Windows API.
+func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int64, error) {
+ return int64(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayGetUBound returns upper bounds of SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetUBound in Windows API.
+func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int64, error) {
+ return int64(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayGetVartype returns data type of SafeArray.
+//
+// AKA: SafeArrayGetVartype in Windows API.
+func safeArrayGetVartype(safearray *SafeArray) (uint16, error) {
+ return uint16(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayLock locks SafeArray for reading to modify SafeArray.
+//
+// This must be called during some calls to ensure that another process does not
+// read or write to the SafeArray during editing.
+//
+// AKA: SafeArrayLock in Windows API.
+func safeArrayLock(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayUnlock unlocks SafeArray for reading.
+//
+// AKA: SafeArrayUnlock in Windows API.
+func safeArrayUnlock(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayPutElement stores the data element at the specified location in the
+// array.
+//
+// AKA: SafeArrayPutElement in Windows API.
+func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayGetRecordInfo accesses IRecordInfo info for custom types.
+//
+// AKA: SafeArrayGetRecordInfo in Windows API.
+//
+// XXX: Must implement IRecordInfo interface for this to return.
+func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArraySetRecordInfo mutates IRecordInfo info for custom types.
+//
+// AKA: SafeArraySetRecordInfo in Windows API.
+//
+// XXX: Must implement IRecordInfo interface for this to return.
+func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error {
+ return NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go
new file mode 100644
index 000000000..b27936e24
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearray_windows.go
@@ -0,0 +1,337 @@
+// +build windows
+
+package ole
+
+import (
+ "unsafe"
+)
+
+var (
+ procSafeArrayAccessData, _ = modoleaut32.FindProc("SafeArrayAccessData")
+ procSafeArrayAllocData, _ = modoleaut32.FindProc("SafeArrayAllocData")
+ procSafeArrayAllocDescriptor, _ = modoleaut32.FindProc("SafeArrayAllocDescriptor")
+ procSafeArrayAllocDescriptorEx, _ = modoleaut32.FindProc("SafeArrayAllocDescriptorEx")
+ procSafeArrayCopy, _ = modoleaut32.FindProc("SafeArrayCopy")
+ procSafeArrayCopyData, _ = modoleaut32.FindProc("SafeArrayCopyData")
+ procSafeArrayCreate, _ = modoleaut32.FindProc("SafeArrayCreate")
+ procSafeArrayCreateEx, _ = modoleaut32.FindProc("SafeArrayCreateEx")
+ procSafeArrayCreateVector, _ = modoleaut32.FindProc("SafeArrayCreateVector")
+ procSafeArrayCreateVectorEx, _ = modoleaut32.FindProc("SafeArrayCreateVectorEx")
+ procSafeArrayDestroy, _ = modoleaut32.FindProc("SafeArrayDestroy")
+ procSafeArrayDestroyData, _ = modoleaut32.FindProc("SafeArrayDestroyData")
+ procSafeArrayDestroyDescriptor, _ = modoleaut32.FindProc("SafeArrayDestroyDescriptor")
+ procSafeArrayGetDim, _ = modoleaut32.FindProc("SafeArrayGetDim")
+ procSafeArrayGetElement, _ = modoleaut32.FindProc("SafeArrayGetElement")
+ procSafeArrayGetElemsize, _ = modoleaut32.FindProc("SafeArrayGetElemsize")
+ procSafeArrayGetIID, _ = modoleaut32.FindProc("SafeArrayGetIID")
+ procSafeArrayGetLBound, _ = modoleaut32.FindProc("SafeArrayGetLBound")
+ procSafeArrayGetUBound, _ = modoleaut32.FindProc("SafeArrayGetUBound")
+ procSafeArrayGetVartype, _ = modoleaut32.FindProc("SafeArrayGetVartype")
+ procSafeArrayLock, _ = modoleaut32.FindProc("SafeArrayLock")
+ procSafeArrayPtrOfIndex, _ = modoleaut32.FindProc("SafeArrayPtrOfIndex")
+ procSafeArrayUnaccessData, _ = modoleaut32.FindProc("SafeArrayUnaccessData")
+ procSafeArrayUnlock, _ = modoleaut32.FindProc("SafeArrayUnlock")
+ procSafeArrayPutElement, _ = modoleaut32.FindProc("SafeArrayPutElement")
+ //procSafeArrayRedim, _ = modoleaut32.FindProc("SafeArrayRedim") // TODO
+ //procSafeArraySetIID, _ = modoleaut32.FindProc("SafeArraySetIID") // TODO
+ procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo")
+ procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo")
+)
+
+// safeArrayAccessData returns raw array pointer.
+//
+// AKA: SafeArrayAccessData in Windows API.
+// Todo: Test
+func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) {
+ err = convertHresultToError(
+ procSafeArrayAccessData.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&element))))
+ return
+}
+
+// safeArrayUnaccessData releases raw array.
+//
+// AKA: SafeArrayUnaccessData in Windows API.
+func safeArrayUnaccessData(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayAllocData allocates SafeArray.
+//
+// AKA: SafeArrayAllocData in Windows API.
+func safeArrayAllocData(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayAllocDescriptor allocates SafeArray.
+//
+// AKA: SafeArrayAllocDescriptor in Windows API.
+func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) {
+ err = convertHresultToError(
+ procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray))))
+ return
+}
+
+// safeArrayAllocDescriptorEx allocates SafeArray.
+//
+// AKA: SafeArrayAllocDescriptorEx in Windows API.
+func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) {
+ err = convertHresultToError(
+ procSafeArrayAllocDescriptorEx.Call(
+ uintptr(variantType),
+ uintptr(dimensions),
+ uintptr(unsafe.Pointer(&safearray))))
+ return
+}
+
+// safeArrayCopy returns copy of SafeArray.
+//
+// AKA: SafeArrayCopy in Windows API.
+func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) {
+ err = convertHresultToError(
+ procSafeArrayCopy.Call(
+ uintptr(unsafe.Pointer(original)),
+ uintptr(unsafe.Pointer(&safearray))))
+ return
+}
+
+// safeArrayCopyData duplicates SafeArray into another SafeArray object.
+//
+// AKA: SafeArrayCopyData in Windows API.
+func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) {
+ err = convertHresultToError(
+ procSafeArrayCopyData.Call(
+ uintptr(unsafe.Pointer(original)),
+ uintptr(unsafe.Pointer(duplicate))))
+ return
+}
+
+// safeArrayCreate creates SafeArray.
+//
+// AKA: SafeArrayCreate in Windows API.
+func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) {
+ sa, _, err := procSafeArrayCreate.Call(
+ uintptr(variantType),
+ uintptr(dimensions),
+ uintptr(unsafe.Pointer(bounds)))
+ safearray = (*SafeArray)(unsafe.Pointer(&sa))
+ return
+}
+
+// safeArrayCreateEx creates SafeArray.
+//
+// AKA: SafeArrayCreateEx in Windows API.
+func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) {
+ sa, _, err := procSafeArrayCreateEx.Call(
+ uintptr(variantType),
+ uintptr(dimensions),
+ uintptr(unsafe.Pointer(bounds)),
+ extra)
+ safearray = (*SafeArray)(unsafe.Pointer(sa))
+ return
+}
+
+// safeArrayCreateVector creates SafeArray.
+//
+// AKA: SafeArrayCreateVector in Windows API.
+func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) {
+ sa, _, err := procSafeArrayCreateVector.Call(
+ uintptr(variantType),
+ uintptr(lowerBound),
+ uintptr(length))
+ safearray = (*SafeArray)(unsafe.Pointer(sa))
+ return
+}
+
+// safeArrayCreateVectorEx creates SafeArray.
+//
+// AKA: SafeArrayCreateVectorEx in Windows API.
+func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) {
+ sa, _, err := procSafeArrayCreateVectorEx.Call(
+ uintptr(variantType),
+ uintptr(lowerBound),
+ uintptr(length),
+ extra)
+ safearray = (*SafeArray)(unsafe.Pointer(sa))
+ return
+}
+
+// safeArrayDestroy destroys SafeArray object.
+//
+// AKA: SafeArrayDestroy in Windows API.
+func safeArrayDestroy(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayDestroyData destroys SafeArray object.
+//
+// AKA: SafeArrayDestroyData in Windows API.
+func safeArrayDestroyData(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayDestroyDescriptor destroys SafeArray object.
+//
+// AKA: SafeArrayDestroyDescriptor in Windows API.
+func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayGetDim is the amount of dimensions in the SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetDim in Windows API.
+func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) {
+ l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray)))
+ dimensions = (*uint32)(unsafe.Pointer(l))
+ return
+}
+
+// safeArrayGetElementSize is the element size in bytes.
+//
+// AKA: SafeArrayGetElemsize in Windows API.
+func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) {
+ l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray)))
+ length = (*uint32)(unsafe.Pointer(l))
+ return
+}
+
+// safeArrayGetElement retrieves element at given index.
+func safeArrayGetElement(safearray *SafeArray, index int64, pv unsafe.Pointer) error {
+ return convertHresultToError(
+ procSafeArrayGetElement.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&index)),
+ uintptr(pv)))
+}
+
+// safeArrayGetElementString retrieves element at given index and converts to string.
+func safeArrayGetElementString(safearray *SafeArray, index int64) (str string, err error) {
+ var element *int16
+ err = convertHresultToError(
+ procSafeArrayGetElement.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&index)),
+ uintptr(unsafe.Pointer(&element))))
+ str = BstrToString(*(**uint16)(unsafe.Pointer(&element)))
+ SysFreeString(element)
+ return
+}
+
+// safeArrayGetIID is the InterfaceID of the elements in the SafeArray.
+//
+// AKA: SafeArrayGetIID in Windows API.
+func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetIID.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&guid))))
+ return
+}
+
+// safeArrayGetLBound returns lower bounds of SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetLBound in Windows API.
+func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int64, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetLBound.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(dimension),
+ uintptr(unsafe.Pointer(&lowerBound))))
+ return
+}
+
+// safeArrayGetUBound returns upper bounds of SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetUBound in Windows API.
+func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int64, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetUBound.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(dimension),
+ uintptr(unsafe.Pointer(&upperBound))))
+ return
+}
+
+// safeArrayGetVartype returns data type of SafeArray.
+//
+// AKA: SafeArrayGetVartype in Windows API.
+func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetVartype.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&varType))))
+ return
+}
+
+// safeArrayLock locks SafeArray for reading to modify SafeArray.
+//
+// This must be called during some calls to ensure that another process does not
+// read or write to the SafeArray during editing.
+//
+// AKA: SafeArrayLock in Windows API.
+func safeArrayLock(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayUnlock unlocks SafeArray for reading.
+//
+// AKA: SafeArrayUnlock in Windows API.
+func safeArrayUnlock(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayPutElement stores the data element at the specified location in the
+// array.
+//
+// AKA: SafeArrayPutElement in Windows API.
+func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) {
+ err = convertHresultToError(
+ procSafeArrayPutElement.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&index)),
+ uintptr(unsafe.Pointer(element))))
+ return
+}
+
+// safeArrayGetRecordInfo accesses IRecordInfo info for custom types.
+//
+// AKA: SafeArrayGetRecordInfo in Windows API.
+//
+// XXX: Must implement IRecordInfo interface for this to return.
+func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetRecordInfo.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&recordInfo))))
+ return
+}
+
+// safeArraySetRecordInfo mutates IRecordInfo info for custom types.
+//
+// AKA: SafeArraySetRecordInfo in Windows API.
+//
+// XXX: Must implement IRecordInfo interface for this to return.
+func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) {
+ err = convertHresultToError(
+ procSafeArraySetRecordInfo.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&recordInfo))))
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go
new file mode 100644
index 000000000..ffeb2b97b
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearrayconversion.go
@@ -0,0 +1,140 @@
+// Helper for converting SafeArray to array of objects.
+
+package ole
+
+import (
+ "unsafe"
+)
+
+type SafeArrayConversion struct {
+ Array *SafeArray
+}
+
+func (sac *SafeArrayConversion) ToStringArray() (strings []string) {
+ totalElements, _ := sac.TotalElements(0)
+ strings = make([]string, totalElements)
+
+ for i := int64(0); i < totalElements; i++ {
+ strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i)
+ }
+
+ return
+}
+
+func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) {
+ totalElements, _ := sac.TotalElements(0)
+ bytes = make([]byte, totalElements)
+
+ for i := int64(0); i < totalElements; i++ {
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)]))
+ }
+
+ return
+}
+
+func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) {
+ totalElements, _ := sac.TotalElements(0)
+ values = make([]interface{}, totalElements)
+ vt, _ := safeArrayGetVartype(sac.Array)
+
+ for i := 0; i < int(totalElements); i++ {
+ switch VT(vt) {
+ case VT_BOOL:
+ var v bool
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_I1:
+ var v int8
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_I2:
+ var v int16
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_I4:
+ var v int32
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_I8:
+ var v int64
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_UI1:
+ var v uint8
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_UI2:
+ var v uint16
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_UI4:
+ var v uint32
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_UI8:
+ var v uint64
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_R4:
+ var v float32
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_R8:
+ var v float64
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_BSTR:
+ var v string
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v
+ case VT_VARIANT:
+ var v VARIANT
+ safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v))
+ values[i] = v.Value()
+ default:
+ // TODO
+ }
+ }
+
+ return
+}
+
+func (sac *SafeArrayConversion) GetType() (varType uint16, err error) {
+ return safeArrayGetVartype(sac.Array)
+}
+
+func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) {
+ return safeArrayGetDim(sac.Array)
+}
+
+func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) {
+ return safeArrayGetElementSize(sac.Array)
+}
+
+func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int64, err error) {
+ if index < 1 {
+ index = 1
+ }
+
+ // Get array bounds
+ var LowerBounds int64
+ var UpperBounds int64
+
+ LowerBounds, err = safeArrayGetLBound(sac.Array, index)
+ if err != nil {
+ return
+ }
+
+ UpperBounds, err = safeArrayGetUBound(sac.Array, index)
+ if err != nil {
+ return
+ }
+
+ totalElements = UpperBounds - LowerBounds + 1
+ return
+}
+
+// Release Safe Array memory
+func (sac *SafeArrayConversion) Release() {
+ safeArrayDestroy(sac.Array)
+}
diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go
new file mode 100644
index 000000000..a9fa885f1
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearrayslices.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package ole
+
+import (
+ "unsafe"
+)
+
+func safeArrayFromByteSlice(slice []byte) *SafeArray {
+ array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice)))
+
+ if array == nil {
+ panic("Could not convert []byte to SAFEARRAY")
+ }
+
+ for i, v := range slice {
+ safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v)))
+ }
+ return array
+}
+
+func safeArrayFromStringSlice(slice []string) *SafeArray {
+ array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice)))
+
+ if array == nil {
+ panic("Could not convert []string to SAFEARRAY")
+ }
+ // SysAllocStringLen(s)
+ for i, v := range slice {
+ safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v))))
+ }
+ return array
+}
diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go
new file mode 100644
index 000000000..99ee82dc3
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/utility.go
@@ -0,0 +1,101 @@
+package ole
+
+import (
+ "unicode/utf16"
+ "unsafe"
+)
+
+// ClassIDFrom retrieves class ID whether given is program ID or application string.
+//
+// Helper that provides check against both Class ID from Program ID and Class ID from string. It is
+// faster, if you know which you are using, to use the individual functions, but this will check
+// against available functions for you.
+func ClassIDFrom(programID string) (classID *GUID, err error) {
+ classID, err = CLSIDFromProgID(programID)
+ if err != nil {
+ classID, err = CLSIDFromString(programID)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// BytePtrToString converts byte pointer to a Go string.
+func BytePtrToString(p *byte) string {
+ a := (*[10000]uint8)(unsafe.Pointer(p))
+ i := 0
+ for a[i] != 0 {
+ i++
+ }
+ return string(a[:i])
+}
+
+// UTF16PtrToString is alias for LpOleStrToString.
+//
+// Kept for compatibility reasons.
+func UTF16PtrToString(p *uint16) string {
+ return LpOleStrToString(p)
+}
+
+// LpOleStrToString converts COM Unicode to Go string.
+func LpOleStrToString(p *uint16) string {
+ if p == nil {
+ return ""
+ }
+
+ length := lpOleStrLen(p)
+ a := make([]uint16, length)
+
+ ptr := unsafe.Pointer(p)
+
+ for i := 0; i < int(length); i++ {
+ a[i] = *(*uint16)(ptr)
+ ptr = unsafe.Pointer(uintptr(ptr) + 2)
+ }
+
+ return string(utf16.Decode(a))
+}
+
+// BstrToString converts COM binary string to Go string.
+func BstrToString(p *uint16) string {
+ if p == nil {
+ return ""
+ }
+ length := SysStringLen((*int16)(unsafe.Pointer(p)))
+ a := make([]uint16, length)
+
+ ptr := unsafe.Pointer(p)
+
+ for i := 0; i < int(length); i++ {
+ a[i] = *(*uint16)(ptr)
+ ptr = unsafe.Pointer(uintptr(ptr) + 2)
+ }
+ return string(utf16.Decode(a))
+}
+
+// lpOleStrLen returns the length of Unicode string.
+func lpOleStrLen(p *uint16) (length int64) {
+ if p == nil {
+ return 0
+ }
+
+ ptr := unsafe.Pointer(p)
+
+ for i := 0; ; i++ {
+ if 0 == *(*uint16)(ptr) {
+ length = int64(i)
+ break
+ }
+ ptr = unsafe.Pointer(uintptr(ptr) + 2)
+ }
+ return
+}
+
+// convertHresultToError converts syscall to error, if call is unsuccessful.
+func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) {
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go
new file mode 100644
index 000000000..ebe00f1cf
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variables.go
@@ -0,0 +1,16 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+)
+
+var (
+ modcombase = syscall.NewLazyDLL("combase.dll")
+ modkernel32, _ = syscall.LoadDLL("kernel32.dll")
+ modole32, _ = syscall.LoadDLL("ole32.dll")
+ modoleaut32, _ = syscall.LoadDLL("oleaut32.dll")
+ modmsvcrt, _ = syscall.LoadDLL("msvcrt.dll")
+ moduser32, _ = syscall.LoadDLL("user32.dll")
+)
diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go
new file mode 100644
index 000000000..36969725e
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant.go
@@ -0,0 +1,105 @@
+package ole
+
+import "unsafe"
+
+// NewVariant returns new variant based on type and value.
+func NewVariant(vt VT, val int64) VARIANT {
+ return VARIANT{VT: vt, Val: val}
+}
+
+// ToIUnknown converts Variant to Unknown object.
+func (v *VARIANT) ToIUnknown() *IUnknown {
+ if v.VT != VT_UNKNOWN {
+ return nil
+ }
+ return (*IUnknown)(unsafe.Pointer(uintptr(v.Val)))
+}
+
+// ToIDispatch converts variant to dispatch object.
+func (v *VARIANT) ToIDispatch() *IDispatch {
+ if v.VT != VT_DISPATCH {
+ return nil
+ }
+ return (*IDispatch)(unsafe.Pointer(uintptr(v.Val)))
+}
+
+// ToArray converts variant to SafeArray helper.
+func (v *VARIANT) ToArray() *SafeArrayConversion {
+ if v.VT != VT_SAFEARRAY {
+ if v.VT&VT_ARRAY == 0 {
+ return nil
+ }
+ }
+ var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val)))
+ return &SafeArrayConversion{safeArray}
+}
+
+// ToString converts variant to Go string.
+func (v *VARIANT) ToString() string {
+ if v.VT != VT_BSTR {
+ return ""
+ }
+ return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val)))
+}
+
+// Clear the memory of variant object.
+func (v *VARIANT) Clear() error {
+ return VariantClear(v)
+}
+
+// Value returns variant value based on its type.
+//
+// Currently supported types: 2- and 4-byte integers, strings, bools.
+// Note that 64-bit integers, datetimes, and other types are stored as strings
+// and will be returned as strings.
+//
+// Needs to be further converted, because this returns an interface{}.
+func (v *VARIANT) Value() interface{} {
+ switch v.VT {
+ case VT_I1:
+ return int8(v.Val)
+ case VT_UI1:
+ return uint8(v.Val)
+ case VT_I2:
+ return int16(v.Val)
+ case VT_UI2:
+ return uint16(v.Val)
+ case VT_I4:
+ return int32(v.Val)
+ case VT_UI4:
+ return uint32(v.Val)
+ case VT_I8:
+ return int64(v.Val)
+ case VT_UI8:
+ return uint64(v.Val)
+ case VT_INT:
+ return int(v.Val)
+ case VT_UINT:
+ return uint(v.Val)
+ case VT_INT_PTR:
+ return uintptr(v.Val) // TODO
+ case VT_UINT_PTR:
+ return uintptr(v.Val)
+ case VT_R4:
+ return *(*float32)(unsafe.Pointer(&v.Val))
+ case VT_R8:
+ return *(*float64)(unsafe.Pointer(&v.Val))
+ case VT_BSTR:
+ return v.ToString()
+ case VT_DATE:
+ // VT_DATE type will either return float64 or time.Time.
+ d := float64(v.Val)
+ date, err := GetVariantDate(d)
+ if err != nil {
+ return d
+ }
+ return date
+ case VT_UNKNOWN:
+ return v.ToIUnknown()
+ case VT_DISPATCH:
+ return v.ToIDispatch()
+ case VT_BOOL:
+ return v.Val != 0
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go
new file mode 100644
index 000000000..e73736bf3
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_386.go
@@ -0,0 +1,11 @@
+// +build 386
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go
new file mode 100644
index 000000000..dccdde132
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_amd64.go
@@ -0,0 +1,12 @@
+// +build amd64
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+ _ [8]byte // 24
+}
diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go
new file mode 100644
index 000000000..729b4a04d
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/vt_string.go
@@ -0,0 +1,58 @@
+// generated by stringer -output vt_string.go -type VT; DO NOT EDIT
+
+package ole
+
+import "fmt"
+
+const (
+ _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL"
+ _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR"
+ _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR"
+ _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID"
+ _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR"
+ _VT_name_5 = "VT_ARRAY"
+ _VT_name_6 = "VT_BYREF"
+ _VT_name_7 = "VT_RESERVED"
+ _VT_name_8 = "VT_ILLEGAL"
+)
+
+var (
+ _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110}
+ _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122}
+ _VT_index_2 = [...]uint8{0, 9, 19, 30}
+ _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98}
+ _VT_index_4 = [...]uint8{0, 12, 21}
+ _VT_index_5 = [...]uint8{0, 8}
+ _VT_index_6 = [...]uint8{0, 8}
+ _VT_index_7 = [...]uint8{0, 11}
+ _VT_index_8 = [...]uint8{0, 10}
+)
+
+func (i VT) String() string {
+ switch {
+ case 0 <= i && i <= 14:
+ return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]]
+ case 16 <= i && i <= 31:
+ i -= 16
+ return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]]
+ case 36 <= i && i <= 38:
+ i -= 36
+ return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]]
+ case 64 <= i && i <= 72:
+ i -= 64
+ return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]]
+ case 4095 <= i && i <= 4096:
+ i -= 4095
+ return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]]
+ case i == 8192:
+ return _VT_name_5
+ case i == 16384:
+ return _VT_name_6
+ case i == 32768:
+ return _VT_name_7
+ case i == 65535:
+ return _VT_name_8
+ default:
+ return fmt.Sprintf("VT(%d)", i)
+ }
+}
diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go
new file mode 100644
index 000000000..4e9eca732
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/winrt.go
@@ -0,0 +1,99 @@
+// +build windows
+
+package ole
+
+import (
+ "reflect"
+ "syscall"
+ "unicode/utf8"
+ "unsafe"
+)
+
+var (
+ procRoInitialize = modcombase.NewProc("RoInitialize")
+ procRoActivateInstance = modcombase.NewProc("RoActivateInstance")
+ procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory")
+ procWindowsCreateString = modcombase.NewProc("WindowsCreateString")
+ procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString")
+ procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer")
+)
+
+func RoInitialize(thread_type uint32) (err error) {
+ hr, _, _ := procRoInitialize.Call(uintptr(thread_type))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func RoActivateInstance(clsid string) (ins *IInspectable, err error) {
+ hClsid, err := NewHString(clsid)
+ if err != nil {
+ return nil, err
+ }
+ defer DeleteHString(hClsid)
+
+ hr, _, _ := procRoActivateInstance.Call(
+ uintptr(unsafe.Pointer(hClsid)),
+ uintptr(unsafe.Pointer(&ins)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) {
+ hClsid, err := NewHString(clsid)
+ if err != nil {
+ return nil, err
+ }
+ defer DeleteHString(hClsid)
+
+ hr, _, _ := procRoGetActivationFactory.Call(
+ uintptr(unsafe.Pointer(hClsid)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&ins)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// HString is handle string for pointers.
+type HString uintptr
+
+// NewHString returns a new HString for Go string.
+func NewHString(s string) (hstring HString, err error) {
+ u16 := syscall.StringToUTF16Ptr(s)
+ len := uint32(utf8.RuneCountInString(s))
+ hr, _, _ := procWindowsCreateString.Call(
+ uintptr(unsafe.Pointer(u16)),
+ uintptr(len),
+ uintptr(unsafe.Pointer(&hstring)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// DeleteHString deletes HString.
+func DeleteHString(hstring HString) (err error) {
+ hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// String returns Go string value of HString.
+func (h HString) String() string {
+ var u16buf uintptr
+ var u16len uint32
+ u16buf, _, _ = procWindowsGetStringRawBuffer.Call(
+ uintptr(h),
+ uintptr(unsafe.Pointer(&u16len)))
+
+ u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)}
+ u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr))
+ return syscall.UTF16ToString(u16)
+}
diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go
new file mode 100644
index 000000000..52e6d74c9
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/winrt_doc.go
@@ -0,0 +1,36 @@
+// +build !windows
+
+package ole
+
+// RoInitialize
+func RoInitialize(thread_type uint32) (err error) {
+ return NewError(E_NOTIMPL)
+}
+
+// RoActivateInstance
+func RoActivateInstance(clsid string) (ins *IInspectable, err error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// RoGetActivationFactory
+func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// HString is handle string for pointers.
+type HString uintptr
+
+// NewHString returns a new HString for Go string.
+func NewHString(s string) (hstring HString, err error) {
+ return HString(uintptr(0)), NewError(E_NOTIMPL)
+}
+
+// DeleteHString deletes HString.
+func DeleteHString(hstring HString) (err error) {
+ return NewError(E_NOTIMPL)
+}
+
+// String returns Go string value of HString.
+func (h HString) String() string {
+ return ""
+}
diff --git a/vendor/github.com/gorhill/cronexpr/APLv2 b/vendor/github.com/gorhill/cronexpr/APLv2
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/gorhill/cronexpr/APLv2
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/gorhill/cronexpr/GPLv3 b/vendor/github.com/gorhill/cronexpr/GPLv3
new file mode 100644
index 000000000..c13fcfaf1
--- /dev/null
+++ b/vendor/github.com/gorhill/cronexpr/GPLv3
@@ -0,0 +1,674 @@
+GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. {http://fsf.org/}
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ {one line to give the program's name and a brief idea of what it does.}
+ Copyright (C) {year} {name of author}
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see {http://www.gnu.org/licenses/}.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ cronexpr Copyright (C) 2013 Raymond Hill
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+{http://www.gnu.org/licenses/}.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+{http://www.gnu.org/philosophy/why-not-lgpl.html}.
diff --git a/vendor/github.com/gorhill/cronexpr/README.md b/vendor/github.com/gorhill/cronexpr/README.md
new file mode 100644
index 000000000..e8c56d29d
--- /dev/null
+++ b/vendor/github.com/gorhill/cronexpr/README.md
@@ -0,0 +1,134 @@
+Golang Cron expression parser
+=============================
+Given a cron expression and a time stamp, you can get the next time stamp which satisfies the cron expression.
+
+In another project, I decided to use cron expression syntax to encode scheduling information. Thus this standalone library to parse and apply time stamps to cron expressions.
+
+The time-matching algorithm in this implementation is efficient, it avoids as much as possible to guess the next matching time stamp, a common technique seen in a number of implementations out there.
+
+There is also a companion command-line utility to evaluate cron time expressions: (which of course uses this library).
+
+Implementation
+--------------
+The reference documentation for this implementation is found at
+, which I copy/pasted here (laziness!) with modifications where this implementation differs:
+
+ Field name Mandatory? Allowed values Allowed special characters
+ ---------- ---------- -------------- --------------------------
+ Seconds No 0-59 * / , -
+ Minutes Yes 0-59 * / , -
+ Hours Yes 0-23 * / , -
+ Day of month Yes 1-31 * / , - L W
+ Month Yes 1-12 or JAN-DEC * / , -
+ Day of week Yes 0-6 or SUN-SAT * / , - L #
+ Year No 1970–2099 * / , -
+
+#### Asterisk ( * )
+The asterisk indicates that the cron expression matches for all values of the field. E.g., using an asterisk in the 4th field (month) indicates every month.
+
+#### Slash ( / )
+Slashes describe increments of ranges. For example `3-59/15` in the minute field indicate the third minute of the hour and every 15 minutes thereafter. The form `*/...` is equivalent to the form "first-last/...", that is, an increment over the largest possible range of the field.
+
+#### Comma ( , )
+Commas are used to separate items of a list. For example, using `MON,WED,FRI` in the 5th field (day of week) means Mondays, Wednesdays and Fridays.
+
+#### Hyphen ( - )
+Hyphens define ranges. For example, 2000-2010 indicates every year between 2000 and 2010 AD, inclusive.
+
+#### L
+`L` stands for "last". When used in the day-of-week field, it allows you to specify constructs such as "the last Friday" (`5L`) of a given month. In the day-of-month field, it specifies the last day of the month.
+
+#### W
+The `W` character is allowed for the day-of-month field. This character is used to specify the business day (Monday-Friday) nearest the given day. As an example, if you were to specify `15W` as the value for the day-of-month field, the meaning is: "the nearest business day to the 15th of the month."
+
+So, if the 15th is a Saturday, the trigger fires on Friday the 14th. If the 15th is a Sunday, the trigger fires on Monday the 16th. If the 15th is a Tuesday, then it fires on Tuesday the 15th. However if you specify `1W` as the value for day-of-month, and the 1st is a Saturday, the trigger fires on Monday the 3rd, as it does not 'jump' over the boundary of a month's days.
+
+The `W` character can be specified only when the day-of-month is a single day, not a range or list of days.
+
+The `W` character can also be combined with `L`, i.e. `LW` to mean "the last business day of the month."
+
+#### Hash ( # )
+`#` is allowed for the day-of-week field, and must be followed by a number between one and five. It allows you to specify constructs such as "the second Friday" of a given month.
+
+Predefined cron expressions
+---------------------------
+(Copied from , with text modified according to this implementation)
+
+ Entry Description Equivalent to
+ @annually Run once a year at midnight in the morning of January 1 0 0 0 1 1 * *
+ @yearly Run once a year at midnight in the morning of January 1 0 0 0 1 1 * *
+ @monthly Run once a month at midnight in the morning of the first of the month 0 0 0 1 * * *
+ @weekly Run once a week at midnight in the morning of Sunday 0 0 0 * * 0 *
+ @daily Run once a day at midnight 0 0 0 * * * *
+ @hourly Run once an hour at the beginning of the hour 0 0 * * * * *
+ @reboot Not supported
+
+Other details
+-------------
+* If only six fields are present, a `0` second field is prepended, that is, `* * * * * 2013` internally become `0 * * * * * 2013`.
+* If only five fields are present, a `0` second field is prepended and a wildcard year field is appended, that is, `* * * * Mon` internally become `0 * * * * Mon *`.
+* Domain for day-of-week field is [0-7] instead of [0-6], 7 being Sunday (like 0). This to comply with http://linux.die.net/man/5/crontab#.
+* As of now, the behavior of the code is undetermined if a malformed cron expression is supplied
+
+Install
+-------
+ go get github.com/gorhill/cronexpr
+
+Usage
+-----
+Import the library:
+
+ import "github.com/gorhill/cronexpr"
+ import "time"
+
+Simplest way:
+
+ nextTime := cronexpr.MustParse("0 0 29 2 *").Next(time.Now())
+
+Assuming `time.Now()` is "2013-08-29 09:28:00", then `nextTime` will be "2016-02-29 00:00:00".
+
+You can keep the returned Expression pointer around if you want to reuse it:
+
+ expr := cronexpr.MustParse("0 0 29 2 *")
+ nextTime := expr.Next(time.Now())
+ ...
+ nextTime = expr.Next(nextTime)
+
+Use `time.IsZero()` to find out whether a valid time was returned. For example,
+
+ cronexpr.MustParse("* * * * * 1980").Next(time.Now()).IsZero()
+
+will return `true`, whereas
+
+ cronexpr.MustParse("* * * * * 2050").Next(time.Now()).IsZero()
+
+will return `false` (as of 2013-08-29...)
+
+You may also query for `n` next time stamps:
+
+ cronexpr.MustParse("0 0 29 2 *").NextN(time.Now(), 5)
+
+which returns a slice of time.Time objects, containing the following time stamps (as of 2013-08-30):
+
+ 2016-02-29 00:00:00
+ 2020-02-29 00:00:00
+ 2024-02-29 00:00:00
+ 2028-02-29 00:00:00
+ 2032-02-29 00:00:00
+
+The time zone of time values returned by `Next` and `NextN` is always the
+time zone of the time value passed as argument, unless a zero time value is
+returned.
+
+API
+---
+
+
+License
+-------
+
+License: pick the one which suits you best:
+
+- GPL v3 see
+- APL v2 see
+
diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr.go b/vendor/github.com/gorhill/cronexpr/cronexpr.go
new file mode 100644
index 000000000..58b518fa5
--- /dev/null
+++ b/vendor/github.com/gorhill/cronexpr/cronexpr.go
@@ -0,0 +1,266 @@
+/*!
+ * Copyright 2013 Raymond Hill
+ *
+ * Project: github.com/gorhill/cronexpr
+ * File: cronexpr.go
+ * Version: 1.0
+ * License: pick the one which suits you :
+ * GPL v3 see
+ * APL v2 see
+ *
+ */
+
+// Package cronexpr parses cron time expressions.
+package cronexpr
+
+/******************************************************************************/
+
+import (
+ "fmt"
+ "sort"
+ "time"
+)
+
+/******************************************************************************/
+
+// A Expression represents a specific cron time expression as defined at
+//
+type Expression struct {
+ expression string
+ secondList []int
+ minuteList []int
+ hourList []int
+ daysOfMonth map[int]bool
+ workdaysOfMonth map[int]bool
+ lastDayOfMonth bool
+ lastWorkdayOfMonth bool
+ daysOfMonthRestricted bool
+ actualDaysOfMonthList []int
+ monthList []int
+ daysOfWeek map[int]bool
+ specificWeekDaysOfWeek map[int]bool
+ lastWeekDaysOfWeek map[int]bool
+ daysOfWeekRestricted bool
+ yearList []int
+}
+
+/******************************************************************************/
+
+// MustParse returns a new Expression pointer. It expects a well-formed cron
+// expression. If a malformed cron expression is supplied, it will `panic`.
+// See for documentation
+// about what is a well-formed cron expression from this library's point of
+// view.
+func MustParse(cronLine string) *Expression {
+ expr, err := Parse(cronLine)
+ if err != nil {
+ panic(err)
+ }
+ return expr
+}
+
+/******************************************************************************/
+
+// Parse returns a new Expression pointer. An error is returned if a malformed
+// cron expression is supplied.
+// See for documentation
+// about what is a well-formed cron expression from this library's point of
+// view.
+func Parse(cronLine string) (*Expression, error) {
+
+ // Maybe one of the built-in aliases is being used
+ cron := cronNormalizer.Replace(cronLine)
+
+ indices := fieldFinder.FindAllStringIndex(cron, -1)
+ fieldCount := len(indices)
+ if fieldCount < 5 {
+ return nil, fmt.Errorf("missing field(s)")
+ }
+ // ignore fields beyond 7th
+ if fieldCount > 7 {
+ fieldCount = 7
+ }
+
+ var expr = Expression{}
+ var field = 0
+ var err error
+
+ // second field (optional)
+ if fieldCount == 7 {
+ err = expr.secondFieldHandler(cron[indices[field][0]:indices[field][1]])
+ if err != nil {
+ return nil, err
+ }
+ field += 1
+ } else {
+ expr.secondList = []int{0}
+ }
+
+ // minute field
+ err = expr.minuteFieldHandler(cron[indices[field][0]:indices[field][1]])
+ if err != nil {
+ return nil, err
+ }
+ field += 1
+
+ // hour field
+ err = expr.hourFieldHandler(cron[indices[field][0]:indices[field][1]])
+ if err != nil {
+ return nil, err
+ }
+ field += 1
+
+ // day of month field
+ err = expr.domFieldHandler(cron[indices[field][0]:indices[field][1]])
+ if err != nil {
+ return nil, err
+ }
+ field += 1
+
+ // month field
+ err = expr.monthFieldHandler(cron[indices[field][0]:indices[field][1]])
+ if err != nil {
+ return nil, err
+ }
+ field += 1
+
+ // day of week field
+ err = expr.dowFieldHandler(cron[indices[field][0]:indices[field][1]])
+ if err != nil {
+ return nil, err
+ }
+ field += 1
+
+ // year field
+ if field < fieldCount {
+ err = expr.yearFieldHandler(cron[indices[field][0]:indices[field][1]])
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ expr.yearList = yearDescriptor.defaultList
+ }
+
+ return &expr, nil
+}
+
+/******************************************************************************/
+
+// Next returns the closest time instant immediately following `fromTime` which
+// matches the cron expression `expr`.
+//
+// The `time.Location` of the returned time instant is the same as that of
+// `fromTime`.
+//
+// The zero value of time.Time is returned if no matching time instant exists
+// or if a `fromTime` is itself a zero value.
+func (expr *Expression) Next(fromTime time.Time) time.Time {
+ // Special case
+ if fromTime.IsZero() {
+ return fromTime
+ }
+
+ // Since expr.nextSecond()-expr.nextMonth() expects that the
+ // supplied time stamp is a perfect match to the underlying cron
+ // expression, and since this function is an entry point where `fromTime`
+ // does not necessarily matches the underlying cron expression,
+ // we first need to ensure supplied time stamp matches
+ // the cron expression. If not, this means the supplied time
+ // stamp falls in between matching time stamps, thus we move
+ // to closest future matching immediately upon encountering a mismatching
+ // time stamp.
+
+ // year
+ v := fromTime.Year()
+ i := sort.SearchInts(expr.yearList, v)
+ if i == len(expr.yearList) {
+ return time.Time{}
+ }
+ if v != expr.yearList[i] {
+ return expr.nextYear(fromTime)
+ }
+ // month
+ v = int(fromTime.Month())
+ i = sort.SearchInts(expr.monthList, v)
+ if i == len(expr.monthList) {
+ return expr.nextYear(fromTime)
+ }
+ if v != expr.monthList[i] {
+ return expr.nextMonth(fromTime)
+ }
+
+ expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(fromTime.Year(), int(fromTime.Month()))
+ if len(expr.actualDaysOfMonthList) == 0 {
+ return expr.nextMonth(fromTime)
+ }
+
+ // day of month
+ v = fromTime.Day()
+ i = sort.SearchInts(expr.actualDaysOfMonthList, v)
+ if i == len(expr.actualDaysOfMonthList) {
+ return expr.nextMonth(fromTime)
+ }
+ if v != expr.actualDaysOfMonthList[i] {
+ return expr.nextDayOfMonth(fromTime)
+ }
+ // hour
+ v = fromTime.Hour()
+ i = sort.SearchInts(expr.hourList, v)
+ if i == len(expr.hourList) {
+ return expr.nextDayOfMonth(fromTime)
+ }
+ if v != expr.hourList[i] {
+ return expr.nextHour(fromTime)
+ }
+ // minute
+ v = fromTime.Minute()
+ i = sort.SearchInts(expr.minuteList, v)
+ if i == len(expr.minuteList) {
+ return expr.nextHour(fromTime)
+ }
+ if v != expr.minuteList[i] {
+ return expr.nextMinute(fromTime)
+ }
+ // second
+ v = fromTime.Second()
+ i = sort.SearchInts(expr.secondList, v)
+ if i == len(expr.secondList) {
+ return expr.nextMinute(fromTime)
+ }
+
+ // If we reach this point, there is nothing better to do
+ // than to move to the next second
+
+ return expr.nextSecond(fromTime)
+}
+
+/******************************************************************************/
+
+// NextN returns a slice of `n` closest time instants immediately following
+// `fromTime` which match the cron expression `expr`.
+//
+// The time instants in the returned slice are in chronological ascending order.
+// The `time.Location` of the returned time instants is the same as that of
+// `fromTime`.
+//
+// A slice with len between [0-`n`] is returned, that is, if not enough existing
+// matching time instants exist, the number of returned entries will be less
+// than `n`.
+func (expr *Expression) NextN(fromTime time.Time, n uint) []time.Time {
+ nextTimes := make([]time.Time, 0, n)
+ if n > 0 {
+ fromTime = expr.Next(fromTime)
+ for {
+ if fromTime.IsZero() {
+ break
+ }
+ nextTimes = append(nextTimes, fromTime)
+ n -= 1
+ if n == 0 {
+ break
+ }
+ fromTime = expr.nextSecond(fromTime)
+ }
+ }
+ return nextTimes
+}
diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr_next.go b/vendor/github.com/gorhill/cronexpr/cronexpr_next.go
new file mode 100644
index 000000000..a0ebdb6b2
--- /dev/null
+++ b/vendor/github.com/gorhill/cronexpr/cronexpr_next.go
@@ -0,0 +1,292 @@
+/*!
+ * Copyright 2013 Raymond Hill
+ *
+ * Project: github.com/gorhill/cronexpr
+ * File: cronexpr_next.go
+ * Version: 1.0
+ * License: pick the one which suits you :
+ * GPL v3 see
+ * APL v2 see
+ *
+ */
+
+package cronexpr
+
+/******************************************************************************/
+
+import (
+ "sort"
+ "time"
+)
+
+/******************************************************************************/
+
+var dowNormalizedOffsets = [][]int{
+ {1, 8, 15, 22, 29},
+ {2, 9, 16, 23, 30},
+ {3, 10, 17, 24, 31},
+ {4, 11, 18, 25},
+ {5, 12, 19, 26},
+ {6, 13, 20, 27},
+ {7, 14, 21, 28},
+}
+
+/******************************************************************************/
+
+func (expr *Expression) nextYear(t time.Time) time.Time {
+ // Find index at which item in list is greater or equal to
+ // candidate year
+ i := sort.SearchInts(expr.yearList, t.Year()+1)
+ if i == len(expr.yearList) {
+ return time.Time{}
+ }
+ // Year changed, need to recalculate actual days of month
+ expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(expr.yearList[i], expr.monthList[0])
+ if len(expr.actualDaysOfMonthList) == 0 {
+ return expr.nextMonth(time.Date(
+ expr.yearList[i],
+ time.Month(expr.monthList[0]),
+ 1,
+ expr.hourList[0],
+ expr.minuteList[0],
+ expr.secondList[0],
+ 0,
+ t.Location()))
+ }
+ return time.Date(
+ expr.yearList[i],
+ time.Month(expr.monthList[0]),
+ expr.actualDaysOfMonthList[0],
+ expr.hourList[0],
+ expr.minuteList[0],
+ expr.secondList[0],
+ 0,
+ t.Location())
+}
+
+/******************************************************************************/
+
+func (expr *Expression) nextMonth(t time.Time) time.Time {
+ // Find index at which item in list is greater or equal to
+ // candidate month
+ i := sort.SearchInts(expr.monthList, int(t.Month())+1)
+ if i == len(expr.monthList) {
+ return expr.nextYear(t)
+ }
+ // Month changed, need to recalculate actual days of month
+ expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(t.Year(), expr.monthList[i])
+ if len(expr.actualDaysOfMonthList) == 0 {
+ return expr.nextMonth(time.Date(
+ t.Year(),
+ time.Month(expr.monthList[i]),
+ 1,
+ expr.hourList[0],
+ expr.minuteList[0],
+ expr.secondList[0],
+ 0,
+ t.Location()))
+ }
+
+ return time.Date(
+ t.Year(),
+ time.Month(expr.monthList[i]),
+ expr.actualDaysOfMonthList[0],
+ expr.hourList[0],
+ expr.minuteList[0],
+ expr.secondList[0],
+ 0,
+ t.Location())
+}
+
+/******************************************************************************/
+
+func (expr *Expression) nextDayOfMonth(t time.Time) time.Time {
+ // Find index at which item in list is greater or equal to
+ // candidate day of month
+ i := sort.SearchInts(expr.actualDaysOfMonthList, t.Day()+1)
+ if i == len(expr.actualDaysOfMonthList) {
+ return expr.nextMonth(t)
+ }
+
+ return time.Date(
+ t.Year(),
+ t.Month(),
+ expr.actualDaysOfMonthList[i],
+ expr.hourList[0],
+ expr.minuteList[0],
+ expr.secondList[0],
+ 0,
+ t.Location())
+}
+
+/******************************************************************************/
+
+func (expr *Expression) nextHour(t time.Time) time.Time {
+ // Find index at which item in list is greater or equal to
+ // candidate hour
+ i := sort.SearchInts(expr.hourList, t.Hour()+1)
+ if i == len(expr.hourList) {
+ return expr.nextDayOfMonth(t)
+ }
+
+ return time.Date(
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ expr.hourList[i],
+ expr.minuteList[0],
+ expr.secondList[0],
+ 0,
+ t.Location())
+}
+
+/******************************************************************************/
+
+func (expr *Expression) nextMinute(t time.Time) time.Time {
+ // Find index at which item in list is greater or equal to
+ // candidate minute
+ i := sort.SearchInts(expr.minuteList, t.Minute()+1)
+ if i == len(expr.minuteList) {
+ return expr.nextHour(t)
+ }
+
+ return time.Date(
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ expr.minuteList[i],
+ expr.secondList[0],
+ 0,
+ t.Location())
+}
+
+/******************************************************************************/
+
+func (expr *Expression) nextSecond(t time.Time) time.Time {
+ // nextSecond() assumes all other fields are exactly matched
+ // to the cron expression
+
+ // Find index at which item in list is greater or equal to
+ // candidate second
+ i := sort.SearchInts(expr.secondList, t.Second()+1)
+ if i == len(expr.secondList) {
+ return expr.nextMinute(t)
+ }
+
+ return time.Date(
+ t.Year(),
+ t.Month(),
+ t.Day(),
+ t.Hour(),
+ t.Minute(),
+ expr.secondList[i],
+ 0,
+ t.Location())
+}
+
+/******************************************************************************/
+
+func (expr *Expression) calculateActualDaysOfMonth(year, month int) []int {
+ actualDaysOfMonthMap := make(map[int]bool)
+ firstDayOfMonth := time.Date(year, time.Month(month), 1, 0, 0, 0, 0, time.UTC)
+ lastDayOfMonth := firstDayOfMonth.AddDate(0, 1, -1)
+
+ // As per crontab man page (http://linux.die.net/man/5/crontab#):
+ // "The day of a command's execution can be specified by two
+ // "fields - day of month, and day of week. If both fields are
+ // "restricted (ie, aren't *), the command will be run when
+ // "either field matches the current time"
+
+ // If both fields are not restricted, all days of the month are a hit
+ if expr.daysOfMonthRestricted == false && expr.daysOfWeekRestricted == false {
+ return genericDefaultList[1 : lastDayOfMonth.Day()+1]
+ }
+
+ // day-of-month != `*`
+ if expr.daysOfMonthRestricted {
+ // Last day of month
+ if expr.lastDayOfMonth {
+ actualDaysOfMonthMap[lastDayOfMonth.Day()] = true
+ }
+ // Last work day of month
+ if expr.lastWorkdayOfMonth {
+ actualDaysOfMonthMap[workdayOfMonth(lastDayOfMonth, lastDayOfMonth)] = true
+ }
+ // Days of month
+ for v := range expr.daysOfMonth {
+ // Ignore days beyond end of month
+ if v <= lastDayOfMonth.Day() {
+ actualDaysOfMonthMap[v] = true
+ }
+ }
+ // Work days of month
+ // As per Wikipedia: month boundaries are not crossed.
+ for v := range expr.workdaysOfMonth {
+ // Ignore days beyond end of month
+ if v <= lastDayOfMonth.Day() {
+ actualDaysOfMonthMap[workdayOfMonth(firstDayOfMonth.AddDate(0, 0, v-1), lastDayOfMonth)] = true
+ }
+ }
+ }
+
+ // day-of-week != `*`
+ if expr.daysOfWeekRestricted {
+ // How far first sunday is from first day of month
+ offset := 7 - int(firstDayOfMonth.Weekday())
+ // days of week
+ // offset : (7 - day_of_week_of_1st_day_of_month)
+ // target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7
+ for v := range expr.daysOfWeek {
+ w := dowNormalizedOffsets[(offset+v)%7]
+ actualDaysOfMonthMap[w[0]] = true
+ actualDaysOfMonthMap[w[1]] = true
+ actualDaysOfMonthMap[w[2]] = true
+ actualDaysOfMonthMap[w[3]] = true
+ if len(w) > 4 && w[4] <= lastDayOfMonth.Day() {
+ actualDaysOfMonthMap[w[4]] = true
+ }
+ }
+ // days of week of specific week in the month
+ // offset : (7 - day_of_week_of_1st_day_of_month)
+ // target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7
+ for v := range expr.specificWeekDaysOfWeek {
+ v = 1 + 7*(v/7) + (offset+v)%7
+ if v <= lastDayOfMonth.Day() {
+ actualDaysOfMonthMap[v] = true
+ }
+ }
+ // Last days of week of the month
+ lastWeekOrigin := firstDayOfMonth.AddDate(0, 1, -7)
+ offset = 7 - int(lastWeekOrigin.Weekday())
+ for v := range expr.lastWeekDaysOfWeek {
+ v = lastWeekOrigin.Day() + (offset+v)%7
+ if v <= lastDayOfMonth.Day() {
+ actualDaysOfMonthMap[v] = true
+ }
+ }
+ }
+
+ return toList(actualDaysOfMonthMap)
+}
+
+func workdayOfMonth(targetDom, lastDom time.Time) int {
+ // If saturday, then friday
+ // If sunday, then monday
+ dom := targetDom.Day()
+ dow := targetDom.Weekday()
+ if dow == time.Saturday {
+ if dom > 1 {
+ dom -= 1
+ } else {
+ dom += 2
+ }
+ } else if dow == time.Sunday {
+ if dom < lastDom.Day() {
+ dom += 1
+ } else {
+ dom -= 2
+ }
+ }
+ return dom
+}
diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go b/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go
new file mode 100644
index 000000000..aeb82968a
--- /dev/null
+++ b/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go
@@ -0,0 +1,489 @@
+/*!
+ * Copyright 2013 Raymond Hill
+ *
+ * Project: github.com/gorhill/cronexpr
+ * File: cronexpr_parse.go
+ * Version: 1.0
+ * License: pick the one which suits you best:
+ * GPL v3 see
+ * APL v2 see
+ *
+ */
+
+package cronexpr
+
+/******************************************************************************/
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+/******************************************************************************/
+
+var (
+ genericDefaultList = []int{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ }
+ yearDefaultList = []int{
+ 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979,
+ 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989,
+ 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
+ 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
+ 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029,
+ 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039,
+ 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049,
+ 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059,
+ 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069,
+ 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079,
+ 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089,
+ 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099,
+ }
+)
+
+/******************************************************************************/
+
+var (
+ numberTokens = map[string]int{
+ "0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9,
+ "00": 0, "01": 1, "02": 2, "03": 3, "04": 4, "05": 5, "06": 6, "07": 7, "08": 8, "09": 9,
+ "10": 10, "11": 11, "12": 12, "13": 13, "14": 14, "15": 15, "16": 16, "17": 17, "18": 18, "19": 19,
+ "20": 20, "21": 21, "22": 22, "23": 23, "24": 24, "25": 25, "26": 26, "27": 27, "28": 28, "29": 29,
+ "30": 30, "31": 31, "32": 32, "33": 33, "34": 34, "35": 35, "36": 36, "37": 37, "38": 38, "39": 39,
+ "40": 40, "41": 41, "42": 42, "43": 43, "44": 44, "45": 45, "46": 46, "47": 47, "48": 48, "49": 49,
+ "50": 50, "51": 51, "52": 52, "53": 53, "54": 54, "55": 55, "56": 56, "57": 57, "58": 58, "59": 59,
+ "1970": 1970, "1971": 1971, "1972": 1972, "1973": 1973, "1974": 1974, "1975": 1975, "1976": 1976, "1977": 1977, "1978": 1978, "1979": 1979,
+ "1980": 1980, "1981": 1981, "1982": 1982, "1983": 1983, "1984": 1984, "1985": 1985, "1986": 1986, "1987": 1987, "1988": 1988, "1989": 1989,
+ "1990": 1990, "1991": 1991, "1992": 1992, "1993": 1993, "1994": 1994, "1995": 1995, "1996": 1996, "1997": 1997, "1998": 1998, "1999": 1999,
+ "2000": 2000, "2001": 2001, "2002": 2002, "2003": 2003, "2004": 2004, "2005": 2005, "2006": 2006, "2007": 2007, "2008": 2008, "2009": 2009,
+ "2010": 2010, "2011": 2011, "2012": 2012, "2013": 2013, "2014": 2014, "2015": 2015, "2016": 2016, "2017": 2017, "2018": 2018, "2019": 2019,
+ "2020": 2020, "2021": 2021, "2022": 2022, "2023": 2023, "2024": 2024, "2025": 2025, "2026": 2026, "2027": 2027, "2028": 2028, "2029": 2029,
+ "2030": 2030, "2031": 2031, "2032": 2032, "2033": 2033, "2034": 2034, "2035": 2035, "2036": 2036, "2037": 2037, "2038": 2038, "2039": 2039,
+ "2040": 2040, "2041": 2041, "2042": 2042, "2043": 2043, "2044": 2044, "2045": 2045, "2046": 2046, "2047": 2047, "2048": 2048, "2049": 2049,
+ "2050": 2050, "2051": 2051, "2052": 2052, "2053": 2053, "2054": 2054, "2055": 2055, "2056": 2056, "2057": 2057, "2058": 2058, "2059": 2059,
+ "2060": 2060, "2061": 2061, "2062": 2062, "2063": 2063, "2064": 2064, "2065": 2065, "2066": 2066, "2067": 2067, "2068": 2068, "2069": 2069,
+ "2070": 2070, "2071": 2071, "2072": 2072, "2073": 2073, "2074": 2074, "2075": 2075, "2076": 2076, "2077": 2077, "2078": 2078, "2079": 2079,
+ "2080": 2080, "2081": 2081, "2082": 2082, "2083": 2083, "2084": 2084, "2085": 2085, "2086": 2086, "2087": 2087, "2088": 2088, "2089": 2089,
+ "2090": 2090, "2091": 2091, "2092": 2092, "2093": 2093, "2094": 2094, "2095": 2095, "2096": 2096, "2097": 2097, "2098": 2098, "2099": 2099,
+ }
+ monthTokens = map[string]int{
+ `1`: 1, `jan`: 1, `january`: 1,
+ `2`: 2, `feb`: 2, `february`: 2,
+ `3`: 3, `mar`: 3, `march`: 3,
+ `4`: 4, `apr`: 4, `april`: 4,
+ `5`: 5, `may`: 5,
+ `6`: 6, `jun`: 6, `june`: 6,
+ `7`: 7, `jul`: 7, `july`: 7,
+ `8`: 8, `aug`: 8, `august`: 8,
+ `9`: 9, `sep`: 9, `september`: 9,
+ `10`: 10, `oct`: 10, `october`: 10,
+ `11`: 11, `nov`: 11, `november`: 11,
+ `12`: 12, `dec`: 12, `december`: 12,
+ }
+ dowTokens = map[string]int{
+ `0`: 0, `sun`: 0, `sunday`: 0,
+ `1`: 1, `mon`: 1, `monday`: 1,
+ `2`: 2, `tue`: 2, `tuesday`: 2,
+ `3`: 3, `wed`: 3, `wednesday`: 3,
+ `4`: 4, `thu`: 4, `thursday`: 4,
+ `5`: 5, `fri`: 5, `friday`: 5,
+ `6`: 6, `sat`: 6, `saturday`: 6,
+ `7`: 0,
+ }
+)
+
+/******************************************************************************/
+
+func atoi(s string) int {
+ return numberTokens[s]
+}
+
+type fieldDescriptor struct {
+ name string
+ min, max int
+ defaultList []int
+ valuePattern string
+ atoi func(string) int
+}
+
+var (
+ secondDescriptor = fieldDescriptor{
+ name: "second",
+ min: 0,
+ max: 59,
+ defaultList: genericDefaultList[0:60],
+ valuePattern: `0?[0-9]|[1-5][0-9]`,
+ atoi: atoi,
+ }
+ minuteDescriptor = fieldDescriptor{
+ name: "minute",
+ min: 0,
+ max: 59,
+ defaultList: genericDefaultList[0:60],
+ valuePattern: `0?[0-9]|[1-5][0-9]`,
+ atoi: atoi,
+ }
+ hourDescriptor = fieldDescriptor{
+ name: "hour",
+ min: 0,
+ max: 23,
+ defaultList: genericDefaultList[0:24],
+ valuePattern: `0?[0-9]|1[0-9]|2[0-3]`,
+ atoi: atoi,
+ }
+ domDescriptor = fieldDescriptor{
+ name: "day-of-month",
+ min: 1,
+ max: 31,
+ defaultList: genericDefaultList[1:32],
+ valuePattern: `0?[1-9]|[12][0-9]|3[01]`,
+ atoi: atoi,
+ }
+ monthDescriptor = fieldDescriptor{
+ name: "month",
+ min: 1,
+ max: 12,
+ defaultList: genericDefaultList[1:13],
+ valuePattern: `0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|january|february|march|april|march|april|june|july|august|september|october|november|december`,
+ atoi: func(s string) int {
+ return monthTokens[s]
+ },
+ }
+ dowDescriptor = fieldDescriptor{
+ name: "day-of-week",
+ min: 0,
+ max: 6,
+ defaultList: genericDefaultList[0:7],
+ valuePattern: `0?[0-7]|sun|mon|tue|wed|thu|fri|sat|sunday|monday|tuesday|wednesday|thursday|friday|saturday`,
+ atoi: func(s string) int {
+ return dowTokens[s]
+ },
+ }
+ yearDescriptor = fieldDescriptor{
+ name: "year",
+ min: 1970,
+ max: 2099,
+ defaultList: yearDefaultList[:],
+ valuePattern: `19[789][0-9]|20[0-9]{2}`,
+ atoi: atoi,
+ }
+)
+
+/******************************************************************************/
+
+var (
+ layoutWildcard = `^\*$|^\?$`
+ layoutValue = `^(%value%)$`
+ layoutRange = `^(%value%)-(%value%)$`
+ layoutWildcardAndInterval = `^\*/(\d+)$`
+ layoutValueAndInterval = `^(%value%)/(\d+)$`
+ layoutRangeAndInterval = `^(%value%)-(%value%)/(\d+)$`
+ layoutLastDom = `^l$`
+ layoutWorkdom = `^(%value%)w$`
+ layoutLastWorkdom = `^lw$`
+ layoutDowOfLastWeek = `^(%value%)l$`
+ layoutDowOfSpecificWeek = `^(%value%)#([1-5])$`
+ fieldFinder = regexp.MustCompile(`\S+`)
+ entryFinder = regexp.MustCompile(`[^,]+`)
+ layoutRegexp = make(map[string]*regexp.Regexp)
+)
+
+/******************************************************************************/
+
+var cronNormalizer = strings.NewReplacer(
+ "@yearly", "0 0 0 1 1 * *",
+ "@annually", "0 0 0 1 1 * *",
+ "@monthly", "0 0 0 1 * * *",
+ "@weekly", "0 0 0 * * 0 *",
+ "@daily", "0 0 0 * * * *",
+ "@hourly", "0 0 * * * * *")
+
+/******************************************************************************/
+
+func (expr *Expression) secondFieldHandler(s string) error {
+ var err error
+ expr.secondList, err = genericFieldHandler(s, secondDescriptor)
+ return err
+}
+
+/******************************************************************************/
+
+func (expr *Expression) minuteFieldHandler(s string) error {
+ var err error
+ expr.minuteList, err = genericFieldHandler(s, minuteDescriptor)
+ return err
+}
+
+/******************************************************************************/
+
+func (expr *Expression) hourFieldHandler(s string) error {
+ var err error
+ expr.hourList, err = genericFieldHandler(s, hourDescriptor)
+ return err
+}
+
+/******************************************************************************/
+
+func (expr *Expression) monthFieldHandler(s string) error {
+ var err error
+ expr.monthList, err = genericFieldHandler(s, monthDescriptor)
+ return err
+}
+
+/******************************************************************************/
+
+func (expr *Expression) yearFieldHandler(s string) error {
+ var err error
+ expr.yearList, err = genericFieldHandler(s, yearDescriptor)
+ return err
+}
+
+/******************************************************************************/
+
+const (
+ none = 0
+ one = 1
+ span = 2
+ all = 3
+)
+
+type cronDirective struct {
+ kind int
+ first int
+ last int
+ step int
+ sbeg int
+ send int
+}
+
+func genericFieldHandler(s string, desc fieldDescriptor) ([]int, error) {
+ directives, err := genericFieldParse(s, desc)
+ if err != nil {
+ return nil, err
+ }
+ values := make(map[int]bool)
+ for _, directive := range directives {
+ switch directive.kind {
+ case none:
+ return nil, fmt.Errorf("syntax error in %s field: '%s'", desc.name, s[directive.sbeg:directive.send])
+ case one:
+ populateOne(values, directive.first)
+ case span:
+ populateMany(values, directive.first, directive.last, directive.step)
+ case all:
+ return desc.defaultList, nil
+ }
+ }
+ return toList(values), nil
+}
+
+func (expr *Expression) dowFieldHandler(s string) error {
+ expr.daysOfWeekRestricted = true
+ expr.daysOfWeek = make(map[int]bool)
+ expr.lastWeekDaysOfWeek = make(map[int]bool)
+ expr.specificWeekDaysOfWeek = make(map[int]bool)
+
+ directives, err := genericFieldParse(s, dowDescriptor)
+ if err != nil {
+ return err
+ }
+
+ for _, directive := range directives {
+ switch directive.kind {
+ case none:
+ sdirective := s[directive.sbeg:directive.send]
+ snormal := strings.ToLower(sdirective)
+ // `5L`
+ pairs := makeLayoutRegexp(layoutDowOfLastWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal)
+ if len(pairs) > 0 {
+ populateOne(expr.lastWeekDaysOfWeek, dowDescriptor.atoi(snormal[pairs[2]:pairs[3]]))
+ } else {
+ // `5#3`
+ pairs := makeLayoutRegexp(layoutDowOfSpecificWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal)
+ if len(pairs) > 0 {
+ populateOne(expr.specificWeekDaysOfWeek, (dowDescriptor.atoi(snormal[pairs[4]:pairs[5]])-1)*7+(dowDescriptor.atoi(snormal[pairs[2]:pairs[3]])%7))
+ } else {
+ return fmt.Errorf("syntax error in day-of-week field: '%s'", sdirective)
+ }
+ }
+ case one:
+ populateOne(expr.daysOfWeek, directive.first)
+ case span:
+ populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step)
+ case all:
+ populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step)
+ expr.daysOfWeekRestricted = false
+ }
+ }
+ return nil
+}
+
+func (expr *Expression) domFieldHandler(s string) error {
+ expr.daysOfMonthRestricted = true
+ expr.lastDayOfMonth = false
+ expr.lastWorkdayOfMonth = false
+ expr.daysOfMonth = make(map[int]bool) // days of month map
+ expr.workdaysOfMonth = make(map[int]bool) // work days of month map
+
+ directives, err := genericFieldParse(s, domDescriptor)
+ if err != nil {
+ return err
+ }
+
+ for _, directive := range directives {
+ switch directive.kind {
+ case none:
+ sdirective := s[directive.sbeg:directive.send]
+ snormal := strings.ToLower(sdirective)
+ // `L`
+ if makeLayoutRegexp(layoutLastDom, domDescriptor.valuePattern).MatchString(snormal) {
+ expr.lastDayOfMonth = true
+ } else {
+ // `LW`
+ if makeLayoutRegexp(layoutLastWorkdom, domDescriptor.valuePattern).MatchString(snormal) {
+ expr.lastWorkdayOfMonth = true
+ } else {
+ // `15W`
+ pairs := makeLayoutRegexp(layoutWorkdom, domDescriptor.valuePattern).FindStringSubmatchIndex(snormal)
+ if len(pairs) > 0 {
+ populateOne(expr.workdaysOfMonth, domDescriptor.atoi(snormal[pairs[2]:pairs[3]]))
+ } else {
+ return fmt.Errorf("syntax error in day-of-month field: '%s'", sdirective)
+ }
+ }
+ }
+ case one:
+ populateOne(expr.daysOfMonth, directive.first)
+ case span:
+ populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step)
+ case all:
+ populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step)
+ expr.daysOfMonthRestricted = false
+ }
+ }
+ return nil
+}
+
+/******************************************************************************/
+
+func populateOne(values map[int]bool, v int) {
+ values[v] = true
+}
+
+func populateMany(values map[int]bool, min, max, step int) {
+ for i := min; i <= max; i += step {
+ values[i] = true
+ }
+}
+
+func toList(set map[int]bool) []int {
+ list := make([]int, len(set))
+ i := 0
+ for k := range set {
+ list[i] = k
+ i += 1
+ }
+ sort.Ints(list)
+ return list
+}
+
+/******************************************************************************/
+
+func genericFieldParse(s string, desc fieldDescriptor) ([]*cronDirective, error) {
+ // At least one entry must be present
+ indices := entryFinder.FindAllStringIndex(s, -1)
+ if len(indices) == 0 {
+ return nil, fmt.Errorf("%s field: missing directive", desc.name)
+ }
+
+ directives := make([]*cronDirective, 0, len(indices))
+
+ for i := range indices {
+ directive := cronDirective{
+ sbeg: indices[i][0],
+ send: indices[i][1],
+ }
+ snormal := strings.ToLower(s[indices[i][0]:indices[i][1]])
+
+ // `*`
+ if makeLayoutRegexp(layoutWildcard, desc.valuePattern).MatchString(snormal) {
+ directive.kind = all
+ directive.first = desc.min
+ directive.last = desc.max
+ directive.step = 1
+ directives = append(directives, &directive)
+ continue
+ }
+ // `5`
+ if makeLayoutRegexp(layoutValue, desc.valuePattern).MatchString(snormal) {
+ directive.kind = one
+ directive.first = desc.atoi(snormal)
+ directives = append(directives, &directive)
+ continue
+ }
+ // `5-20`
+ pairs := makeLayoutRegexp(layoutRange, desc.valuePattern).FindStringSubmatchIndex(snormal)
+ if len(pairs) > 0 {
+ directive.kind = span
+ directive.first = desc.atoi(snormal[pairs[2]:pairs[3]])
+ directive.last = desc.atoi(snormal[pairs[4]:pairs[5]])
+ directive.step = 1
+ directives = append(directives, &directive)
+ continue
+ }
+ // `*/2`
+ pairs = makeLayoutRegexp(layoutWildcardAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal)
+ if len(pairs) > 0 {
+ directive.kind = span
+ directive.first = desc.min
+ directive.last = desc.max
+ directive.step = atoi(snormal[pairs[2]:pairs[3]])
+ directives = append(directives, &directive)
+ continue
+ }
+ // `5/2`
+ pairs = makeLayoutRegexp(layoutValueAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal)
+ if len(pairs) > 0 {
+ directive.kind = span
+ directive.first = desc.atoi(snormal[pairs[2]:pairs[3]])
+ directive.last = desc.max
+ directive.step = atoi(snormal[pairs[4]:pairs[5]])
+ directives = append(directives, &directive)
+ continue
+ }
+ // `5-20/2`
+ pairs = makeLayoutRegexp(layoutRangeAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal)
+ if len(pairs) > 0 {
+ directive.kind = span
+ directive.first = desc.atoi(snormal[pairs[2]:pairs[3]])
+ directive.last = desc.atoi(snormal[pairs[4]:pairs[5]])
+ directive.step = atoi(snormal[pairs[6]:pairs[7]])
+ directives = append(directives, &directive)
+ continue
+ }
+ // No behavior for this one, let caller deal with it
+ directive.kind = none
+ directives = append(directives, &directive)
+ }
+ return directives, nil
+}
+
+/******************************************************************************/
+
+func makeLayoutRegexp(layout, value string) *regexp.Regexp {
+ layout = strings.Replace(layout, `%value%`, value, -1)
+ re := layoutRegexp[layout]
+ if re == nil {
+ re = regexp.MustCompile(layout)
+ layoutRegexp[layout] = re
+ }
+ return re
+}
diff --git a/vendor/github.com/hashicorp/consul/lib/cluster.go b/vendor/github.com/hashicorp/consul/lib/cluster.go
new file mode 100644
index 000000000..a95232c57
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/lib/cluster.go
@@ -0,0 +1,56 @@
+package lib
+
+import (
+ "math/rand"
+ "time"
+)
+
+// DurationMinusBuffer returns a duration, minus a buffer and jitter
+// subtracted from the duration. This function is used primarily for
+// servicing Consul TTL Checks in advance of the TTL.
+func DurationMinusBuffer(intv time.Duration, buffer time.Duration, jitter int64) time.Duration {
+ d := intv - buffer
+ if jitter == 0 {
+ d -= RandomStagger(d)
+ } else {
+ d -= RandomStagger(time.Duration(int64(d) / jitter))
+ }
+ return d
+}
+
+// DurationMinusBufferDomain returns the domain of valid durations from a
+// call to DurationMinusBuffer. This function is used to check user
+// specified input values to DurationMinusBuffer.
+func DurationMinusBufferDomain(intv time.Duration, buffer time.Duration, jitter int64) (min time.Duration, max time.Duration) {
+ max = intv - buffer
+ if jitter == 0 {
+ min = max
+ } else {
+ min = max - time.Duration(int64(max)/jitter)
+ }
+ return min, max
+}
+
+// Returns a random stagger interval between 0 and the duration
+func RandomStagger(intv time.Duration) time.Duration {
+ if intv == 0 {
+ return 0
+ }
+ return time.Duration(uint64(rand.Int63()) % uint64(intv))
+}
+
+// RateScaledInterval is used to choose an interval to perform an action in
+// order to target an aggregate number of actions per second across the whole
+// cluster.
+func RateScaledInterval(rate float64, min time.Duration, n int) time.Duration {
+ const minRate = 1 / 86400 // 1/(1 * time.Day)
+ if rate <= minRate {
+ return min
+ }
+ interval := time.Duration(float64(time.Second) * float64(n) / rate)
+ if interval < min {
+ return min
+ }
+
+ return interval
+}
diff --git a/vendor/github.com/hashicorp/consul/lib/math.go b/vendor/github.com/hashicorp/consul/lib/math.go
new file mode 100644
index 000000000..1d0b6dc0f
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/lib/math.go
@@ -0,0 +1,22 @@
+package lib
+
+func AbsInt(a int) int {
+ if a > 0 {
+ return a
+ }
+ return a * -1
+}
+
+func MaxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func MinInt(a, b int) int {
+ if a > b {
+ return b
+ }
+ return a
+}
diff --git a/vendor/github.com/hashicorp/consul/lib/rand.go b/vendor/github.com/hashicorp/consul/lib/rand.go
new file mode 100644
index 000000000..22aa4f354
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/lib/rand.go
@@ -0,0 +1,34 @@
+package lib
+
+import (
+ crand "crypto/rand"
+ "math"
+ "math/big"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+var (
+ once sync.Once
+
+ // SeededSecurely is set to true if a cryptographically secure seed
+ // was used to initialize rand. When false, the start time is used
+ // as a seed.
+ SeededSecurely bool
+)
+
+// SeedMathRand provides weak, but guaranteed seeding, which is better than
+// running with Go's default seed of 1. A call to SeedMathRand() is expected
+// to be called via init(), but never a second time.
+func SeedMathRand() {
+ once.Do(func() {
+ n, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
+ if err != nil {
+ rand.Seed(time.Now().UTC().UnixNano())
+ return
+ }
+ rand.Seed(n.Int64())
+ SeededSecurely = true
+ })
+}
diff --git a/vendor/github.com/hashicorp/consul/lib/string.go b/vendor/github.com/hashicorp/consul/lib/string.go
new file mode 100644
index 000000000..0780abb63
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/lib/string.go
@@ -0,0 +1,11 @@
+package lib
+
+// StrContains checks if a list contains a string
+func StrContains(l []string, s string) bool {
+ for _, v := range l {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/hashicorp/go-msgpack/LICENSE b/vendor/github.com/hashicorp/go-msgpack/LICENSE
new file mode 100644
index 000000000..ccae99f6a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2012, 2013 Ugorji Nwoke.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of the author nor the names of its contributors may be used
+ to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go
new file mode 100644
index 000000000..c14d810a7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go
@@ -0,0 +1,143 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+/*
+High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc .
+
+Supported Serialization formats are:
+
+ - msgpack: [https://github.com/msgpack/msgpack]
+ - binc: [http://github.com/ugorji/binc]
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X.
+ This was achieved by taking extreme care on:
+ - managing allocation
+ - function frame size (important due to Go's use of split stacks),
+ - reflection use (and by-passing reflection for common types)
+ - recursion implications
+ - zero-copy mode (encoding/decoding to byte slice without using temp buffers)
+ - Correct.
+ Care was taken to precisely handle corner cases like:
+ overflows, nil maps and slices, nil value in stream, etc.
+ - Efficient zero-copying into temporary byte buffers
+ when encoding into or decoding from a byte slice.
+ - Standard field renaming via tags
+ - Encoding from any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Decoding into pointer to any non-nil typed value
+ (struct, slice, map, int, float32, bool, string, reflect.Value, etc)
+ - Supports extension functions to handle the encode/decode of custom types
+ - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler
+ - Schema-less decoding
+ (decode into a pointer to a nil interface{} as opposed to a typed non-nil value).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Msgpack Specific:
+ - Provides extension functions to handle spec-defined extensions (binary, timestamp)
+ - Options to resolve ambiguities in handling raw bytes (as string or []byte)
+ during schema-less decoding (decoding into a nil interface{})
+ - RPC Server/Client Codec for msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+ - Fast Paths for some container types:
+ For some container types, we circumvent reflection and its associated overhead
+ and allocation costs, and encode/decode directly. These types are:
+ []interface{}
+ []int
+ []string
+ map[interface{}]interface{}
+ map[int]interface{}
+ map[string]interface{}
+
+Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+Usage
+
+Typical usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
+Representative Benchmark Results
+
+Run the benchmark suite using:
+ go test -bi -bench=. -benchmem
+
+To run full benchmark suite (including against vmsgpack and bson),
+see notes in ext_dep_test.go
+
+*/
+package codec
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/README.md b/vendor/github.com/hashicorp/go-msgpack/codec/README.md
new file mode 100644
index 000000000..6c95d1bfd
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/README.md
@@ -0,0 +1,174 @@
+# Codec
+
+High Performance and Feature-Rich Idiomatic Go Library providing
+encode/decode support for different serialization formats.
+
+Supported Serialization formats are:
+
+ - msgpack: [https://github.com/msgpack/msgpack]
+ - binc: [http://github.com/ugorji/binc]
+
+To install:
+
+ go get github.com/ugorji/go/codec
+
+Online documentation: [http://godoc.org/github.com/ugorji/go/codec]
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X.
+ This was achieved by taking extreme care on:
+ - managing allocation
+ - function frame size (important due to Go's use of split stacks),
+ - reflection use (and by-passing reflection for common types)
+ - recursion implications
+ - zero-copy mode (encoding/decoding to byte slice without using temp buffers)
+ - Correct.
+ Care was taken to precisely handle corner cases like:
+ overflows, nil maps and slices, nil value in stream, etc.
+ - Efficient zero-copying into temporary byte buffers
+ when encoding into or decoding from a byte slice.
+ - Standard field renaming via tags
+ - Encoding from any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Decoding into pointer to any non-nil typed value
+ (struct, slice, map, int, float32, bool, string, reflect.Value, etc)
+ - Supports extension functions to handle the encode/decode of custom types
+ - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler
+ - Schema-less decoding
+ (decode into a pointer to a nil interface{} as opposed to a typed non-nil value).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Msgpack Specific:
+ - Provides extension functions to handle spec-defined extensions (binary, timestamp)
+ - Options to resolve ambiguities in handling raw bytes (as string or []byte)
+ during schema-less decoding (decoding into a nil interface{})
+ - RPC Server/Client Codec for msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+ - Fast Paths for some container types:
+ For some container types, we circumvent reflection and its associated overhead
+ and allocation costs, and encode/decode directly. These types are:
+ []interface{}
+ []int
+ []string
+ map[interface{}]interface{}
+ map[int]interface{}
+ map[string]interface{}
+
+## Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+## RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+## Usage
+
+Typical usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
+## Representative Benchmark Results
+
+A sample run of benchmark using "go test -bi -bench=. -benchmem":
+
+ /proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT)
+
+ ..............................................
+ BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT
+ To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=."
+ Benchmark:
+ Struct recursive Depth: 1
+ ApproxDeepSize Of benchmark Struct: 4694 bytes
+ Benchmark One-Pass Run:
+ v-msgpack: len: 1600 bytes
+ bson: len: 3025 bytes
+ msgpack: len: 1560 bytes
+ binc: len: 1187 bytes
+ gob: len: 1972 bytes
+ json: len: 2538 bytes
+ ..............................................
+ PASS
+ Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op
+ Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op
+ Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op
+ Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op
+ Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op
+ Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op
+ Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op
+ Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op
+ Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op
+ Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op
+ Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op
+ Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op
+ Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op
+ Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op
+ ok ugorji.net/codec 30.827s
+
+To run full benchmark suite (including against vmsgpack and bson),
+see notes in ext\_dep\_test.go
+
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go
new file mode 100644
index 000000000..2bb5e8fee
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go
@@ -0,0 +1,786 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ // "reflect"
+ // "sync/atomic"
+ "time"
+ //"fmt"
+)
+
+const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
+
+//var _ = fmt.Printf
+
+// vd as low 4 bits (there are 16 slots)
+const (
+ bincVdSpecial byte = iota
+ bincVdPosInt
+ bincVdNegInt
+ bincVdFloat
+
+ bincVdString
+ bincVdByteArray
+ bincVdArray
+ bincVdMap
+
+ bincVdTimestamp
+ bincVdSmallInt
+ bincVdUnicodeOther
+ bincVdSymbol
+
+ bincVdDecimal
+ _ // open slot
+ _ // open slot
+ bincVdCustomExt = 0x0f
+)
+
+const (
+ bincSpNil byte = iota
+ bincSpFalse
+ bincSpTrue
+ bincSpNan
+ bincSpPosInf
+ bincSpNegInf
+ bincSpZeroFloat
+ bincSpZero
+ bincSpNegOne
+)
+
+const (
+ bincFlBin16 byte = iota
+ bincFlBin32
+ _ // bincFlBin32e
+ bincFlBin64
+ _ // bincFlBin64e
+ // others not currently supported
+)
+
+type bincEncDriver struct {
+ w encWriter
+ m map[string]uint16 // symbols
+ s uint32 // symbols sequencer
+ b [8]byte
+}
+
+func (e *bincEncDriver) isBuiltinType(rt uintptr) bool {
+ return rt == timeTypId
+}
+
+func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) {
+ switch rt {
+ case timeTypId:
+ bs := encodeTime(v.(time.Time))
+ e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
+ e.w.writeb(bs)
+ }
+}
+
+func (e *bincEncDriver) encodeNil() {
+ e.w.writen1(bincVdSpecial<<4 | bincSpNil)
+}
+
+func (e *bincEncDriver) encodeBool(b bool) {
+ if b {
+ e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
+ } else {
+ e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
+ }
+}
+
+func (e *bincEncDriver) encodeFloat32(f float32) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin32)
+ e.w.writeUint32(math.Float32bits(f))
+}
+
+func (e *bincEncDriver) encodeFloat64(f float64) {
+ if f == 0 {
+ e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ return
+ }
+ bigen.PutUint64(e.b[:], math.Float64bits(f))
+ if bincDoPrune {
+ i := 7
+ for ; i >= 0 && (e.b[i] == 0); i-- {
+ }
+ i++
+ if i <= 6 {
+ e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
+ e.w.writen1(byte(i))
+ e.w.writeb(e.b[:i])
+ return
+ }
+ }
+ e.w.writen1(bincVdFloat<<4 | bincFlBin64)
+ e.w.writeb(e.b[:])
+}
+
+func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
+ if lim == 4 {
+ bigen.PutUint32(e.b[:lim], uint32(v))
+ } else {
+ bigen.PutUint64(e.b[:lim], v)
+ }
+ if bincDoPrune {
+ i := pruneSignExt(e.b[:lim], pos)
+ e.w.writen1(bd | lim - 1 - byte(i))
+ e.w.writeb(e.b[i:lim])
+ } else {
+ e.w.writen1(bd | lim - 1)
+ e.w.writeb(e.b[:lim])
+ }
+}
+
+func (e *bincEncDriver) encodeInt(v int64) {
+ const nbd byte = bincVdNegInt << 4
+ switch {
+ case v >= 0:
+ e.encUint(bincVdPosInt<<4, true, uint64(v))
+ case v == -1:
+ e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
+ default:
+ e.encUint(bincVdNegInt<<4, false, uint64(-v))
+ }
+}
+
+func (e *bincEncDriver) encodeUint(v uint64) {
+ e.encUint(bincVdPosInt<<4, true, v)
+}
+
+func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
+ switch {
+ case v == 0:
+ e.w.writen1(bincVdSpecial<<4 | bincSpZero)
+ case pos && v >= 1 && v <= 16:
+ e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
+ case v <= math.MaxUint8:
+ e.w.writen2(bd|0x0, byte(v))
+ case v <= math.MaxUint16:
+ e.w.writen1(bd | 0x01)
+ e.w.writeUint16(uint16(v))
+ case v <= math.MaxUint32:
+ e.encIntegerPrune(bd, pos, v, 4)
+ default:
+ e.encIntegerPrune(bd, pos, v, 8)
+ }
+}
+
+func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(bincVdCustomExt<<4, uint64(length))
+ e.w.writen1(xtag)
+}
+
+func (e *bincEncDriver) encodeArrayPreamble(length int) {
+ e.encLen(bincVdArray<<4, uint64(length))
+}
+
+func (e *bincEncDriver) encodeMapPreamble(length int) {
+ e.encLen(bincVdMap<<4, uint64(length))
+}
+
+func (e *bincEncDriver) encodeString(c charEncoding, v string) {
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) encodeSymbol(v string) {
+ // if WriteSymbolsNoRefs {
+ // e.encodeString(c_UTF8, v)
+ // return
+ // }
+
+ //symbols only offer benefit when string length > 1.
+ //This is because strings with length 1 take only 2 bytes to store
+ //(bd with embedded length, and single byte for string val).
+
+ l := len(v)
+ switch l {
+ case 0:
+ e.encBytesLen(c_UTF8, 0)
+ return
+ case 1:
+ e.encBytesLen(c_UTF8, 1)
+ e.w.writen1(v[0])
+ return
+ }
+ if e.m == nil {
+ e.m = make(map[string]uint16, 16)
+ }
+ ui, ok := e.m[v]
+ if ok {
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8)
+ e.w.writeUint16(ui)
+ }
+ } else {
+ e.s++
+ ui = uint16(e.s)
+ //ui = uint16(atomic.AddUint32(&e.s, 1))
+ e.m[v] = ui
+ var lenprec uint8
+ switch {
+ case l <= math.MaxUint8:
+ // lenprec = 0
+ case l <= math.MaxUint16:
+ lenprec = 1
+ case int64(l) <= math.MaxUint32:
+ lenprec = 2
+ default:
+ lenprec = 3
+ }
+ if ui <= math.MaxUint8 {
+ e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
+ } else {
+ e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
+ e.w.writeUint16(ui)
+ }
+ switch lenprec {
+ case 0:
+ e.w.writen1(byte(l))
+ case 1:
+ e.w.writeUint16(uint16(l))
+ case 2:
+ e.w.writeUint32(uint32(l))
+ default:
+ e.w.writeUint64(uint64(l))
+ }
+ e.w.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) {
+ l := uint64(len(v))
+ e.encBytesLen(c, l)
+ if l > 0 {
+ e.w.writeb(v)
+ }
+}
+
+func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
+ //TODO: support bincUnicodeOther (for now, just use string or bytearray)
+ if c == c_RAW {
+ e.encLen(bincVdByteArray<<4, length)
+ } else {
+ e.encLen(bincVdString<<4, length)
+ }
+}
+
+func (e *bincEncDriver) encLen(bd byte, l uint64) {
+ if l < 12 {
+ e.w.writen1(bd | uint8(l+4))
+ } else {
+ e.encLenNumber(bd, l)
+ }
+}
+
+func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
+ switch {
+ case v <= math.MaxUint8:
+ e.w.writen2(bd, byte(v))
+ case v <= math.MaxUint16:
+ e.w.writen1(bd | 0x01)
+ e.w.writeUint16(uint16(v))
+ case v <= math.MaxUint32:
+ e.w.writen1(bd | 0x02)
+ e.w.writeUint32(uint32(v))
+ default:
+ e.w.writen1(bd | 0x03)
+ e.w.writeUint64(uint64(v))
+ }
+}
+
+//------------------------------------
+
+type bincDecDriver struct {
+ r decReader
+ bdRead bool
+ bdType valueType
+ bd byte
+ vd byte
+ vs byte
+ b [8]byte
+ m map[uint32]string // symbols (use uint32 as key, as map optimizes for it)
+}
+
+func (d *bincDecDriver) initReadNext() {
+ if d.bdRead {
+ return
+ }
+ d.bd = d.r.readn1()
+ d.vd = d.bd >> 4
+ d.vs = d.bd & 0x0f
+ d.bdRead = true
+ d.bdType = valueTypeUnset
+}
+
+func (d *bincDecDriver) currentEncodedType() valueType {
+ if d.bdType == valueTypeUnset {
+ switch d.vd {
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpNil:
+ d.bdType = valueTypeNil
+ case bincSpFalse, bincSpTrue:
+ d.bdType = valueTypeBool
+ case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat:
+ d.bdType = valueTypeFloat
+ case bincSpZero:
+ d.bdType = valueTypeUint
+ case bincSpNegOne:
+ d.bdType = valueTypeInt
+ default:
+ decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs)
+ }
+ case bincVdSmallInt:
+ d.bdType = valueTypeUint
+ case bincVdPosInt:
+ d.bdType = valueTypeUint
+ case bincVdNegInt:
+ d.bdType = valueTypeInt
+ case bincVdFloat:
+ d.bdType = valueTypeFloat
+ case bincVdString:
+ d.bdType = valueTypeString
+ case bincVdSymbol:
+ d.bdType = valueTypeSymbol
+ case bincVdByteArray:
+ d.bdType = valueTypeBytes
+ case bincVdTimestamp:
+ d.bdType = valueTypeTimestamp
+ case bincVdCustomExt:
+ d.bdType = valueTypeExt
+ case bincVdArray:
+ d.bdType = valueTypeArray
+ case bincVdMap:
+ d.bdType = valueTypeMap
+ default:
+ decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd)
+ }
+ }
+ return d.bdType
+}
+
+func (d *bincDecDriver) tryDecodeAsNil() bool {
+ if d.bd == bincVdSpecial<<4|bincSpNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *bincDecDriver) isBuiltinType(rt uintptr) bool {
+ return rt == timeTypId
+}
+
+func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) {
+ switch rt {
+ case timeTypId:
+ if d.vd != bincVdTimestamp {
+ decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
+ }
+ tt, err := decodeTime(d.r.readn(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ var vt *time.Time = v.(*time.Time)
+ *vt = tt
+ d.bdRead = false
+ }
+}
+
+func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
+ if vs&0x8 == 0 {
+ d.r.readb(d.b[0:defaultLen])
+ } else {
+ l := d.r.readn1()
+ if l > 8 {
+ decErr("At most 8 bytes used to represent float. Received: %v bytes", l)
+ }
+ for i := l; i < 8; i++ {
+ d.b[i] = 0
+ }
+ d.r.readb(d.b[0:l])
+ }
+}
+
+func (d *bincDecDriver) decFloat() (f float64) {
+ //if true { f = math.Float64frombits(d.r.readUint64()); break; }
+ switch vs := d.vs; vs & 0x7 {
+ case bincFlBin32:
+ d.decFloatPre(vs, 4)
+ f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
+ case bincFlBin64:
+ d.decFloatPre(vs, 8)
+ f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
+ default:
+ decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
+ }
+ return
+}
+
+func (d *bincDecDriver) decUint() (v uint64) {
+ // need to inline the code (interface conversion and type assertion expensive)
+ switch d.vs {
+ case 0:
+ v = uint64(d.r.readn1())
+ case 1:
+ d.r.readb(d.b[6:])
+ v = uint64(bigen.Uint16(d.b[6:]))
+ case 2:
+ d.b[4] = 0
+ d.r.readb(d.b[5:])
+ v = uint64(bigen.Uint32(d.b[4:]))
+ case 3:
+ d.r.readb(d.b[4:])
+ v = uint64(bigen.Uint32(d.b[4:]))
+ case 4, 5, 6:
+ lim := int(7 - d.vs)
+ d.r.readb(d.b[lim:])
+ for i := 0; i < lim; i++ {
+ d.b[i] = 0
+ }
+ v = uint64(bigen.Uint64(d.b[:]))
+ case 7:
+ d.r.readb(d.b[:])
+ v = uint64(bigen.Uint64(d.b[:]))
+ default:
+ decErr("unsigned integers with greater than 64 bits of precision not supported")
+ }
+ return
+}
+
+func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) {
+ switch d.vd {
+ case bincVdPosInt:
+ ui = d.decUint()
+ i = int64(ui)
+ case bincVdNegInt:
+ ui = d.decUint()
+ i = -(int64(ui))
+ neg = true
+ case bincVdSmallInt:
+ i = int64(d.vs) + 1
+ ui = uint64(d.vs) + 1
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpZero:
+ //i = 0
+ case bincSpNegOne:
+ neg = true
+ ui = 1
+ i = -1
+ default:
+ decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs)
+ }
+ default:
+ decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
+ }
+ return
+}
+
+func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) {
+ _, i, _ = d.decIntAny()
+ checkOverflow(0, i, bitsize)
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) {
+ ui, i, neg := d.decIntAny()
+ if neg {
+ decErr("Assigning negative signed value: %v, to unsigned type", i)
+ }
+ checkOverflow(ui, 0, bitsize)
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) {
+ switch d.vd {
+ case bincVdSpecial:
+ d.bdRead = false
+ switch d.vs {
+ case bincSpNan:
+ return math.NaN()
+ case bincSpPosInf:
+ return math.Inf(1)
+ case bincSpZeroFloat, bincSpZero:
+ return
+ case bincSpNegInf:
+ return math.Inf(-1)
+ default:
+ decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
+ }
+ case bincVdFloat:
+ f = d.decFloat()
+ default:
+ _, i, _ := d.decIntAny()
+ f = float64(i)
+ }
+ checkOverflowFloat32(f, chkOverflow32)
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *bincDecDriver) decodeBool() (b bool) {
+ switch d.bd {
+ case (bincVdSpecial | bincSpFalse):
+ // b = false
+ case (bincVdSpecial | bincSpTrue):
+ b = true
+ default:
+ decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) readMapLen() (length int) {
+ if d.vd != bincVdMap {
+ decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) readArrayLen() (length int) {
+ if d.vd != bincVdArray {
+ decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decLen() int {
+ if d.vs <= 3 {
+ return int(d.decUint())
+ }
+ return int(d.vs - 4)
+}
+
+func (d *bincDecDriver) decodeString() (s string) {
+ switch d.vd {
+ case bincVdString, bincVdByteArray:
+ if length := d.decLen(); length > 0 {
+ s = string(d.r.readn(length))
+ }
+ case bincVdSymbol:
+ //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
+ //extract symbol
+ //if containsStringVal, read it and put in map
+ //else look in map for string value
+ var symbol uint32
+ vs := d.vs
+ //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4)
+ if vs&0x8 == 0 {
+ symbol = uint32(d.r.readn1())
+ } else {
+ symbol = uint32(d.r.readUint16())
+ }
+ if d.m == nil {
+ d.m = make(map[uint32]string, 16)
+ }
+
+ if vs&0x4 == 0 {
+ s = d.m[symbol]
+ } else {
+ var slen int
+ switch vs & 0x3 {
+ case 0:
+ slen = int(d.r.readn1())
+ case 1:
+ slen = int(d.r.readUint16())
+ case 2:
+ slen = int(d.r.readUint32())
+ case 3:
+ slen = int(d.r.readUint64())
+ }
+ s = string(d.r.readn(slen))
+ d.m[symbol] = s
+ }
+ default:
+ decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
+ bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) {
+ var clen int
+ switch d.vd {
+ case bincVdString, bincVdByteArray:
+ clen = d.decLen()
+ default:
+ decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
+ bincVdString, bincVdByteArray, d.vd)
+ }
+ if clen > 0 {
+ // if no contents in stream, don't update the passed byteslice
+ if len(bs) != clen {
+ if len(bs) > clen {
+ bs = bs[:clen]
+ } else {
+ bs = make([]byte, clen)
+ }
+ bsOut = bs
+ changed = true
+ }
+ d.r.readb(bs)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ switch d.vd {
+ case bincVdCustomExt:
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ }
+ xbs = d.r.readn(l)
+ case bincVdByteArray:
+ xbs, _ = d.decodeBytes(nil)
+ default:
+ decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
+ d.initReadNext()
+
+ switch d.vd {
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpNil:
+ vt = valueTypeNil
+ case bincSpFalse:
+ vt = valueTypeBool
+ v = false
+ case bincSpTrue:
+ vt = valueTypeBool
+ v = true
+ case bincSpNan:
+ vt = valueTypeFloat
+ v = math.NaN()
+ case bincSpPosInf:
+ vt = valueTypeFloat
+ v = math.Inf(1)
+ case bincSpNegInf:
+ vt = valueTypeFloat
+ v = math.Inf(-1)
+ case bincSpZeroFloat:
+ vt = valueTypeFloat
+ v = float64(0)
+ case bincSpZero:
+ vt = valueTypeUint
+ v = int64(0) // int8(0)
+ case bincSpNegOne:
+ vt = valueTypeInt
+ v = int64(-1) // int8(-1)
+ default:
+ decErr("decodeNaked: Unrecognized special value 0x%x", d.vs)
+ }
+ case bincVdSmallInt:
+ vt = valueTypeUint
+ v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
+ case bincVdPosInt:
+ vt = valueTypeUint
+ v = d.decUint()
+ case bincVdNegInt:
+ vt = valueTypeInt
+ v = -(int64(d.decUint()))
+ case bincVdFloat:
+ vt = valueTypeFloat
+ v = d.decFloat()
+ case bincVdSymbol:
+ vt = valueTypeSymbol
+ v = d.decodeString()
+ case bincVdString:
+ vt = valueTypeString
+ v = d.decodeString()
+ case bincVdByteArray:
+ vt = valueTypeBytes
+ v, _ = d.decodeBytes(nil)
+ case bincVdTimestamp:
+ vt = valueTypeTimestamp
+ tt, err := decodeTime(d.r.readn(int(d.vs)))
+ if err != nil {
+ panic(err)
+ }
+ v = tt
+ case bincVdCustomExt:
+ vt = valueTypeExt
+ l := d.decLen()
+ var re RawExt
+ re.Tag = d.r.readn1()
+ re.Data = d.r.readn(l)
+ v = &re
+ vt = valueTypeExt
+ case bincVdArray:
+ vt = valueTypeArray
+ decodeFurther = true
+ case bincVdMap:
+ vt = valueTypeMap
+ decodeFurther = true
+ default:
+ decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+//------------------------------------
+
+//BincHandle is a Handle for the Binc Schema-Free Encoding Format
+//defined at https://github.com/ugorji/binc .
+//
+//BincHandle currently supports all Binc features with the following EXCEPTIONS:
+// - only integers up to 64 bits of precision are supported.
+// big integers are unsupported.
+// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
+// extended precision and decimal IEEE 754 floats are unsupported.
+// - Only UTF-8 strings supported.
+// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
+//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
+type BincHandle struct {
+ BasicHandle
+}
+
+func (h *BincHandle) newEncDriver(w encWriter) encDriver {
+ return &bincEncDriver{w: w}
+}
+
+func (h *BincHandle) newDecDriver(r decReader) decDriver {
+ return &bincDecDriver{r: r}
+}
+
+func (_ *BincHandle) writeExt() bool {
+ return true
+}
+
+func (h *BincHandle) getBasicHandle() *BasicHandle {
+ return &h.BasicHandle
+}
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go
new file mode 100644
index 000000000..87bef2b93
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go
@@ -0,0 +1,1048 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+ "io"
+ "reflect"
+ // "runtime/debug"
+)
+
+// Some tagging information for error messages.
+const (
+ msgTagDec = "codec.decoder"
+ msgBadDesc = "Unrecognized descriptor byte"
+ msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v"
+)
+
+// decReader abstracts the reading source, allowing implementations that can
+// read from an io.Reader or directly off a byte slice with zero-copying.
+type decReader interface {
+ readn(n int) []byte
+ readb([]byte)
+ readn1() uint8
+ readUint16() uint16
+ readUint32() uint32
+ readUint64() uint64
+}
+
+type decDriver interface {
+ initReadNext()
+ tryDecodeAsNil() bool
+ currentEncodedType() valueType
+ isBuiltinType(rt uintptr) bool
+ decodeBuiltin(rt uintptr, v interface{})
+ //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
+ decodeNaked() (v interface{}, vt valueType, decodeFurther bool)
+ decodeInt(bitsize uint8) (i int64)
+ decodeUint(bitsize uint8) (ui uint64)
+ decodeFloat(chkOverflow32 bool) (f float64)
+ decodeBool() (b bool)
+ // decodeString can also decode symbols
+ decodeString() (s string)
+ decodeBytes(bs []byte) (bsOut []byte, changed bool)
+ decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
+ readMapLen() int
+ readArrayLen() int
+}
+
+type DecodeOptions struct {
+ // An instance of MapType is used during schema-less decoding of a map in the stream.
+ // If nil, we use map[interface{}]interface{}
+ MapType reflect.Type
+ // An instance of SliceType is used during schema-less decoding of an array in the stream.
+ // If nil, we use []interface{}
+ SliceType reflect.Type
+ // ErrorIfNoField controls whether an error is returned when decoding a map
+ // from a codec stream into a struct, and no matching struct field is found.
+ ErrorIfNoField bool
+}
+
+// ------------------------------------
+
+// ioDecReader is a decReader that reads off an io.Reader
+type ioDecReader struct {
+ r io.Reader
+ br io.ByteReader
+ x [8]byte //temp byte array re-used internally for efficiency
+}
+
+func (z *ioDecReader) readn(n int) (bs []byte) {
+ if n <= 0 {
+ return
+ }
+ bs = make([]byte, n)
+ if _, err := io.ReadAtLeast(z.r, bs, n); err != nil {
+ panic(err)
+ }
+ return
+}
+
+func (z *ioDecReader) readb(bs []byte) {
+ if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil {
+ panic(err)
+ }
+}
+
+func (z *ioDecReader) readn1() uint8 {
+ if z.br != nil {
+ b, err := z.br.ReadByte()
+ if err != nil {
+ panic(err)
+ }
+ return b
+ }
+ z.readb(z.x[:1])
+ return z.x[0]
+}
+
+func (z *ioDecReader) readUint16() uint16 {
+ z.readb(z.x[:2])
+ return bigen.Uint16(z.x[:2])
+}
+
+func (z *ioDecReader) readUint32() uint32 {
+ z.readb(z.x[:4])
+ return bigen.Uint32(z.x[:4])
+}
+
+func (z *ioDecReader) readUint64() uint64 {
+ z.readb(z.x[:8])
+ return bigen.Uint64(z.x[:8])
+}
+
+// ------------------------------------
+
+// bytesDecReader is a decReader that reads off a byte slice with zero copying
+type bytesDecReader struct {
+ b []byte // data
+ c int // cursor
+ a int // available
+}
+
+func (z *bytesDecReader) consume(n int) (oldcursor int) {
+ if z.a == 0 {
+ panic(io.EOF)
+ }
+ if n > z.a {
+ decErr("Trying to read %v bytes. Only %v available", n, z.a)
+ }
+ // z.checkAvailable(n)
+ oldcursor = z.c
+ z.c = oldcursor + n
+ z.a = z.a - n
+ return
+}
+
+func (z *bytesDecReader) readn(n int) (bs []byte) {
+ if n <= 0 {
+ return
+ }
+ c0 := z.consume(n)
+ bs = z.b[c0:z.c]
+ return
+}
+
+func (z *bytesDecReader) readb(bs []byte) {
+ copy(bs, z.readn(len(bs)))
+}
+
+func (z *bytesDecReader) readn1() uint8 {
+ c0 := z.consume(1)
+ return z.b[c0]
+}
+
+// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits
+// creating temp slice variable and copying it to helper function is expensive
+// for just 2 bits.
+
+func (z *bytesDecReader) readUint16() uint16 {
+ c0 := z.consume(2)
+ return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8
+}
+
+func (z *bytesDecReader) readUint32() uint32 {
+ c0 := z.consume(4)
+ return bigen.Uint32(z.b[c0:z.c])
+}
+
+func (z *bytesDecReader) readUint64() uint64 {
+ c0 := z.consume(8)
+ return bigen.Uint64(z.b[c0:z.c])
+}
+
+// ------------------------------------
+
+// decFnInfo has methods for registering handling decoding of a specific type
+// based on some characteristics (builtin, extension, reflect Kind, etc)
+type decFnInfo struct {
+ ti *typeInfo
+ d *Decoder
+ dd decDriver
+ xfFn func(reflect.Value, []byte) error
+ xfTag byte
+ array bool
+}
+
+func (f *decFnInfo) builtin(rv reflect.Value) {
+ f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface())
+}
+
+func (f *decFnInfo) rawExt(rv reflect.Value) {
+ xtag, xbs := f.dd.decodeExt(false, 0)
+ rv.Field(0).SetUint(uint64(xtag))
+ rv.Field(1).SetBytes(xbs)
+}
+
+func (f *decFnInfo) ext(rv reflect.Value) {
+ _, xbs := f.dd.decodeExt(true, f.xfTag)
+ if fnerr := f.xfFn(rv, xbs); fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) binaryMarshal(rv reflect.Value) {
+ var bm binaryUnmarshaler
+ if f.ti.unmIndir == -1 {
+ bm = rv.Addr().Interface().(binaryUnmarshaler)
+ } else if f.ti.unmIndir == 0 {
+ bm = rv.Interface().(binaryUnmarshaler)
+ } else {
+ for j, k := int8(0), f.ti.unmIndir; j < k; j++ {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rv = rv.Elem()
+ }
+ bm = rv.Interface().(binaryUnmarshaler)
+ }
+ xbs, _ := f.dd.decodeBytes(nil)
+ if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil {
+ panic(fnerr)
+ }
+}
+
+func (f *decFnInfo) kErr(rv reflect.Value) {
+ decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc)
+}
+
+func (f *decFnInfo) kString(rv reflect.Value) {
+ rv.SetString(f.dd.decodeString())
+}
+
+func (f *decFnInfo) kBool(rv reflect.Value) {
+ rv.SetBool(f.dd.decodeBool())
+}
+
+func (f *decFnInfo) kInt(rv reflect.Value) {
+ rv.SetInt(f.dd.decodeInt(intBitsize))
+}
+
+func (f *decFnInfo) kInt64(rv reflect.Value) {
+ rv.SetInt(f.dd.decodeInt(64))
+}
+
+func (f *decFnInfo) kInt32(rv reflect.Value) {
+ rv.SetInt(f.dd.decodeInt(32))
+}
+
+func (f *decFnInfo) kInt8(rv reflect.Value) {
+ rv.SetInt(f.dd.decodeInt(8))
+}
+
+func (f *decFnInfo) kInt16(rv reflect.Value) {
+ rv.SetInt(f.dd.decodeInt(16))
+}
+
+func (f *decFnInfo) kFloat32(rv reflect.Value) {
+ rv.SetFloat(f.dd.decodeFloat(true))
+}
+
+func (f *decFnInfo) kFloat64(rv reflect.Value) {
+ rv.SetFloat(f.dd.decodeFloat(false))
+}
+
+func (f *decFnInfo) kUint8(rv reflect.Value) {
+ rv.SetUint(f.dd.decodeUint(8))
+}
+
+func (f *decFnInfo) kUint64(rv reflect.Value) {
+ rv.SetUint(f.dd.decodeUint(64))
+}
+
+func (f *decFnInfo) kUint(rv reflect.Value) {
+ rv.SetUint(f.dd.decodeUint(uintBitsize))
+}
+
+func (f *decFnInfo) kUint32(rv reflect.Value) {
+ rv.SetUint(f.dd.decodeUint(32))
+}
+
+func (f *decFnInfo) kUint16(rv reflect.Value) {
+ rv.SetUint(f.dd.decodeUint(16))
+}
+
+// func (f *decFnInfo) kPtr(rv reflect.Value) {
+// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called")
+// if rv.IsNil() {
+// rv.Set(reflect.New(rv.Type().Elem()))
+// }
+// f.d.decodeValue(rv.Elem())
+// }
+
+func (f *decFnInfo) kInterface(rv reflect.Value) {
+ // debugf("\t===> kInterface")
+ if !rv.IsNil() {
+ f.d.decodeValue(rv.Elem())
+ return
+ }
+ // nil interface:
+ // use some hieristics to set the nil interface to an
+ // appropriate value based on the first byte read (byte descriptor bd)
+ v, vt, decodeFurther := f.dd.decodeNaked()
+ if vt == valueTypeNil {
+ return
+ }
+ // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc)
+ // if non-nil value in stream.
+ if num := f.ti.rt.NumMethod(); num > 0 {
+ decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)",
+ f.ti.rt, num)
+ }
+ var rvn reflect.Value
+ var useRvn bool
+ switch vt {
+ case valueTypeMap:
+ if f.d.h.MapType == nil {
+ var m2 map[interface{}]interface{}
+ v = &m2
+ } else {
+ rvn = reflect.New(f.d.h.MapType).Elem()
+ useRvn = true
+ }
+ case valueTypeArray:
+ if f.d.h.SliceType == nil {
+ var m2 []interface{}
+ v = &m2
+ } else {
+ rvn = reflect.New(f.d.h.SliceType).Elem()
+ useRvn = true
+ }
+ case valueTypeExt:
+ re := v.(*RawExt)
+ var bfn func(reflect.Value, []byte) error
+ rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag)
+ if bfn == nil {
+ rvn = reflect.ValueOf(*re)
+ } else if fnerr := bfn(rvn, re.Data); fnerr != nil {
+ panic(fnerr)
+ }
+ rv.Set(rvn)
+ return
+ }
+ if decodeFurther {
+ if useRvn {
+ f.d.decodeValue(rvn)
+ } else if v != nil {
+ // this v is a pointer, so we need to dereference it when done
+ f.d.decode(v)
+ rvn = reflect.ValueOf(v).Elem()
+ useRvn = true
+ }
+ }
+ if useRvn {
+ rv.Set(rvn)
+ } else if v != nil {
+ rv.Set(reflect.ValueOf(v))
+ }
+}
+
+func (f *decFnInfo) kStruct(rv reflect.Value) {
+ fti := f.ti
+ if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap {
+ containerLen := f.dd.readMapLen()
+ if containerLen == 0 {
+ return
+ }
+ tisfi := fti.sfi
+ for j := 0; j < containerLen; j++ {
+ // var rvkencname string
+ // ddecode(&rvkencname)
+ f.dd.initReadNext()
+ rvkencname := f.dd.decodeString()
+ // rvksi := ti.getForEncName(rvkencname)
+ if k := fti.indexForEncName(rvkencname); k > -1 {
+ sfik := tisfi[k]
+ if sfik.i != -1 {
+ f.d.decodeValue(rv.Field(int(sfik.i)))
+ } else {
+ f.d.decEmbeddedField(rv, sfik.is)
+ }
+ // f.d.decodeValue(ti.field(k, rv))
+ } else {
+ if f.d.h.ErrorIfNoField {
+ decErr("No matching struct field found when decoding stream map with key: %v",
+ rvkencname)
+ } else {
+ var nilintf0 interface{}
+ f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem())
+ }
+ }
+ }
+ } else if currEncodedType == valueTypeArray {
+ containerLen := f.dd.readArrayLen()
+ if containerLen == 0 {
+ return
+ }
+ for j, si := range fti.sfip {
+ if j == containerLen {
+ break
+ }
+ if si.i != -1 {
+ f.d.decodeValue(rv.Field(int(si.i)))
+ } else {
+ f.d.decEmbeddedField(rv, si.is)
+ }
+ }
+ if containerLen > len(fti.sfip) {
+ // read remaining values and throw away
+ for j := len(fti.sfip); j < containerLen; j++ {
+ var nilintf0 interface{}
+ f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem())
+ }
+ }
+ } else {
+ decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)",
+ currEncodedType)
+ }
+}
+
+func (f *decFnInfo) kSlice(rv reflect.Value) {
+ // A slice can be set from a map or array in stream.
+ currEncodedType := f.dd.currentEncodedType()
+
+ switch currEncodedType {
+ case valueTypeBytes, valueTypeString:
+ if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 {
+ if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 {
+ rv.SetBytes(bs2)
+ }
+ return
+ }
+ }
+
+ if shortCircuitReflectToFastPath && rv.CanAddr() {
+ switch f.ti.rtid {
+ case intfSliceTypId:
+ f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array)
+ return
+ case uint64SliceTypId:
+ f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array)
+ return
+ case int64SliceTypId:
+ f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array)
+ return
+ case strSliceTypId:
+ f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array)
+ return
+ }
+ }
+
+ containerLen, containerLenS := decContLens(f.dd, currEncodedType)
+
+ // an array can never return a nil slice. so no need to check f.array here.
+
+ if rv.IsNil() {
+ rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS))
+ }
+
+ if containerLen == 0 {
+ return
+ }
+
+ if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap {
+ if f.array { // !rv.CanSet()
+ decErr(msgDecCannotExpandArr, rvcap, containerLenS)
+ }
+ rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)
+ if rvlen > 0 {
+ reflect.Copy(rvn, rv)
+ }
+ rv.Set(rvn)
+ } else if containerLenS > rvlen {
+ rv.SetLen(containerLenS)
+ }
+
+ for j := 0; j < containerLenS; j++ {
+ f.d.decodeValue(rv.Index(j))
+ }
+}
+
+func (f *decFnInfo) kArray(rv reflect.Value) {
+ // f.d.decodeValue(rv.Slice(0, rv.Len()))
+ f.kSlice(rv.Slice(0, rv.Len()))
+}
+
+func (f *decFnInfo) kMap(rv reflect.Value) {
+ if shortCircuitReflectToFastPath && rv.CanAddr() {
+ switch f.ti.rtid {
+ case mapStrIntfTypId:
+ f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{}))
+ return
+ case mapIntfIntfTypId:
+ f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{}))
+ return
+ case mapInt64IntfTypId:
+ f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{}))
+ return
+ case mapUint64IntfTypId:
+ f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{}))
+ return
+ }
+ }
+
+ containerLen := f.dd.readMapLen()
+
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(f.ti.rt))
+ }
+
+ if containerLen == 0 {
+ return
+ }
+
+ ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem()
+ ktypeId := reflect.ValueOf(ktype).Pointer()
+ for j := 0; j < containerLen; j++ {
+ rvk := reflect.New(ktype).Elem()
+ f.d.decodeValue(rvk)
+
+ // special case if a byte array.
+ // if ktype == intfTyp {
+ if ktypeId == intfTypId {
+ rvk = rvk.Elem()
+ if rvk.Type() == uint8SliceTyp {
+ rvk = reflect.ValueOf(string(rvk.Bytes()))
+ }
+ }
+ rvv := rv.MapIndex(rvk)
+ if !rvv.IsValid() {
+ rvv = reflect.New(vtype).Elem()
+ }
+
+ f.d.decodeValue(rvv)
+ rv.SetMapIndex(rvk, rvv)
+ }
+}
+
+// ----------------------------------------
+
+type decFn struct {
+ i *decFnInfo
+ f func(*decFnInfo, reflect.Value)
+}
+
+// A Decoder reads and decodes an object from an input stream in the codec format.
+type Decoder struct {
+ r decReader
+ d decDriver
+ h *BasicHandle
+ f map[uintptr]decFn
+ x []uintptr
+ s []decFn
+}
+
+// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered writer
+// (eg bufio.Reader, bytes.Buffer).
+func NewDecoder(r io.Reader, h Handle) *Decoder {
+ z := ioDecReader{
+ r: r,
+ }
+ z.br, _ = r.(io.ByteReader)
+ return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()}
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) *Decoder {
+ z := bytesDecReader{
+ b: in,
+ a: len(in),
+ }
+ return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()}
+}
+
+// Decode decodes the stream from reader and stores the result in the
+// value pointed to by v. v cannot be a nil pointer. v can also be
+// a reflect.Value of a pointer.
+//
+// Note that a pointer to a nil interface is not a nil pointer.
+// If you do not know what type of stream it is, pass in a pointer to a nil interface.
+// We will decode and store a value in that nil interface.
+//
+// Sample usages:
+// // Decoding into a non-nil typed value
+// var f float32
+// err = codec.NewDecoder(r, handle).Decode(&f)
+//
+// // Decoding into nil interface
+// var v interface{}
+// dec := codec.NewDecoder(r, handle)
+// err = dec.Decode(&v)
+//
+// When decoding into a nil interface{}, we will decode into an appropriate value based
+// on the contents of the stream:
+// - Numbers are decoded as float64, int64 or uint64.
+// - Other values are decoded appropriately depending on the type:
+// bool, string, []byte, time.Time, etc
+// - Extensions are decoded as RawExt (if no ext function registered for the tag)
+// Configurations exist on the Handle to override defaults
+// (e.g. for MapType, SliceType and how to decode raw bytes).
+//
+// When decoding into a non-nil interface{} value, the mode of encoding is based on the
+// type of the value. When a value is seen:
+// - If an extension is registered for it, call that extension function
+// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error
+// - Else decode it based on its reflect.Kind
+//
+// There are some special rules when decoding into containers (slice/array/map/struct).
+// Decode will typically use the stream contents to UPDATE the container.
+// - A map can be decoded from a stream map, by updating matching keys.
+// - A slice can be decoded from a stream array,
+// by updating the first n elements, where n is length of the stream.
+// - A slice can be decoded from a stream map, by decoding as if
+// it contains a sequence of key-value pairs.
+// - A struct can be decoded from a stream map, by updating matching fields.
+// - A struct can be decoded from a stream array,
+// by updating fields as they occur in the struct (by index).
+//
+// When decoding a stream map or array with length of 0 into a nil map or slice,
+// we reset the destination map or slice to a zero-length value.
+//
+// However, when decoding a stream nil, we reset the destination container
+// to its "zero" value (e.g. nil for slice/map, etc).
+//
+func (d *Decoder) Decode(v interface{}) (err error) {
+ defer panicToErr(&err)
+ d.decode(v)
+ return
+}
+
+func (d *Decoder) decode(iv interface{}) {
+ d.d.initReadNext()
+
+ switch v := iv.(type) {
+ case nil:
+ decErr("Cannot decode into nil.")
+
+ case reflect.Value:
+ d.chkPtrValue(v)
+ d.decodeValue(v.Elem())
+
+ case *string:
+ *v = d.d.decodeString()
+ case *bool:
+ *v = d.d.decodeBool()
+ case *int:
+ *v = int(d.d.decodeInt(intBitsize))
+ case *int8:
+ *v = int8(d.d.decodeInt(8))
+ case *int16:
+ *v = int16(d.d.decodeInt(16))
+ case *int32:
+ *v = int32(d.d.decodeInt(32))
+ case *int64:
+ *v = d.d.decodeInt(64)
+ case *uint:
+ *v = uint(d.d.decodeUint(uintBitsize))
+ case *uint8:
+ *v = uint8(d.d.decodeUint(8))
+ case *uint16:
+ *v = uint16(d.d.decodeUint(16))
+ case *uint32:
+ *v = uint32(d.d.decodeUint(32))
+ case *uint64:
+ *v = d.d.decodeUint(64)
+ case *float32:
+ *v = float32(d.d.decodeFloat(true))
+ case *float64:
+ *v = d.d.decodeFloat(false)
+ case *[]byte:
+ *v, _ = d.d.decodeBytes(*v)
+
+ case *[]interface{}:
+ d.decSliceIntf(v, valueTypeInvalid, false)
+ case *[]uint64:
+ d.decSliceUint64(v, valueTypeInvalid, false)
+ case *[]int64:
+ d.decSliceInt64(v, valueTypeInvalid, false)
+ case *[]string:
+ d.decSliceStr(v, valueTypeInvalid, false)
+ case *map[string]interface{}:
+ d.decMapStrIntf(v)
+ case *map[interface{}]interface{}:
+ d.decMapIntfIntf(v)
+ case *map[uint64]interface{}:
+ d.decMapUint64Intf(v)
+ case *map[int64]interface{}:
+ d.decMapInt64Intf(v)
+
+ case *interface{}:
+ d.decodeValue(reflect.ValueOf(iv).Elem())
+
+ default:
+ rv := reflect.ValueOf(iv)
+ d.chkPtrValue(rv)
+ d.decodeValue(rv.Elem())
+ }
+}
+
+func (d *Decoder) decodeValue(rv reflect.Value) {
+ d.d.initReadNext()
+
+ if d.d.tryDecodeAsNil() {
+ // If value in stream is nil, set the dereferenced value to its "zero" value (if settable)
+ if rv.Kind() == reflect.Ptr {
+ if !rv.IsNil() {
+ rv.Set(reflect.Zero(rv.Type()))
+ }
+ return
+ }
+ // for rv.Kind() == reflect.Ptr {
+ // rv = rv.Elem()
+ // }
+ if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid
+ rv.Set(reflect.Zero(rv.Type()))
+ }
+ return
+ }
+
+ // If stream is not containing a nil value, then we can deref to the base
+ // non-pointer value, and decode into that.
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ rv = rv.Elem()
+ }
+
+ rt := rv.Type()
+ rtid := reflect.ValueOf(rt).Pointer()
+
+ // retrieve or register a focus'ed function for this type
+ // to eliminate need to do the retrieval multiple times
+
+ // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) }
+ var fn decFn
+ var ok bool
+ if useMapForCodecCache {
+ fn, ok = d.f[rtid]
+ } else {
+ for i, v := range d.x {
+ if v == rtid {
+ fn, ok = d.s[i], true
+ break
+ }
+ }
+ }
+ if !ok {
+ // debugf("\tCreating new dec fn for type: %v\n", rt)
+ fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d}
+ fn.i = &fi
+ // An extension can be registered for any type, regardless of the Kind
+ // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc.
+ //
+ // We can't check if it's an extension byte here first, because the user may have
+ // registered a pointer or non-pointer type, meaning we may have to recurse first
+ // before matching a mapped type, even though the extension byte is already detected.
+ //
+ // NOTE: if decoding into a nil interface{}, we return a non-nil
+ // value except even if the container registers a length of 0.
+ if rtid == rawExtTypId {
+ fn.f = (*decFnInfo).rawExt
+ } else if d.d.isBuiltinType(rtid) {
+ fn.f = (*decFnInfo).builtin
+ } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfTag, xfFn
+ fn.f = (*decFnInfo).ext
+ } else if supportBinaryMarshal && fi.ti.unm {
+ fn.f = (*decFnInfo).binaryMarshal
+ } else {
+ switch rk := rt.Kind(); rk {
+ case reflect.String:
+ fn.f = (*decFnInfo).kString
+ case reflect.Bool:
+ fn.f = (*decFnInfo).kBool
+ case reflect.Int:
+ fn.f = (*decFnInfo).kInt
+ case reflect.Int64:
+ fn.f = (*decFnInfo).kInt64
+ case reflect.Int32:
+ fn.f = (*decFnInfo).kInt32
+ case reflect.Int8:
+ fn.f = (*decFnInfo).kInt8
+ case reflect.Int16:
+ fn.f = (*decFnInfo).kInt16
+ case reflect.Float32:
+ fn.f = (*decFnInfo).kFloat32
+ case reflect.Float64:
+ fn.f = (*decFnInfo).kFloat64
+ case reflect.Uint8:
+ fn.f = (*decFnInfo).kUint8
+ case reflect.Uint64:
+ fn.f = (*decFnInfo).kUint64
+ case reflect.Uint:
+ fn.f = (*decFnInfo).kUint
+ case reflect.Uint32:
+ fn.f = (*decFnInfo).kUint32
+ case reflect.Uint16:
+ fn.f = (*decFnInfo).kUint16
+ // case reflect.Ptr:
+ // fn.f = (*decFnInfo).kPtr
+ case reflect.Interface:
+ fn.f = (*decFnInfo).kInterface
+ case reflect.Struct:
+ fn.f = (*decFnInfo).kStruct
+ case reflect.Slice:
+ fn.f = (*decFnInfo).kSlice
+ case reflect.Array:
+ fi.array = true
+ fn.f = (*decFnInfo).kArray
+ case reflect.Map:
+ fn.f = (*decFnInfo).kMap
+ default:
+ fn.f = (*decFnInfo).kErr
+ }
+ }
+ if useMapForCodecCache {
+ if d.f == nil {
+ d.f = make(map[uintptr]decFn, 16)
+ }
+ d.f[rtid] = fn
+ } else {
+ d.s = append(d.s, fn)
+ d.x = append(d.x, rtid)
+ }
+ }
+
+ fn.f(fn.i, rv)
+
+ return
+}
+
+func (d *Decoder) chkPtrValue(rv reflect.Value) {
+ // We can only decode into a non-nil pointer
+ if rv.Kind() == reflect.Ptr && !rv.IsNil() {
+ return
+ }
+ if !rv.IsValid() {
+ decErr("Cannot decode into a zero (ie invalid) reflect.Value")
+ }
+ if !rv.CanInterface() {
+ decErr("Cannot decode into a value without an interface: %v", rv)
+ }
+ rvi := rv.Interface()
+ decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v",
+ rv.Kind(), rvi, rvi)
+}
+
+func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) {
+ // d.decodeValue(rv.FieldByIndex(index))
+ // nil pointers may be here; so reproduce FieldByIndex logic + enhancements
+ for _, j := range index {
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ // If a pointer, it must be a pointer to struct (based on typeInfo contract)
+ rv = rv.Elem()
+ }
+ rv = rv.Field(j)
+ }
+ d.decodeValue(rv)
+}
+
+// --------------------------------------------------
+
+// short circuit functions for common maps and slices
+
+func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) {
+ _, containerLenS := decContLens(d.d, currEncodedType)
+ s := *v
+ if s == nil {
+ s = make([]interface{}, containerLenS, containerLenS)
+ } else if containerLenS > cap(s) {
+ if doNotReset {
+ decErr(msgDecCannotExpandArr, cap(s), containerLenS)
+ }
+ s = make([]interface{}, containerLenS, containerLenS)
+ copy(s, *v)
+ } else if containerLenS > len(s) {
+ s = s[:containerLenS]
+ }
+ for j := 0; j < containerLenS; j++ {
+ d.decode(&s[j])
+ }
+ *v = s
+}
+
+func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) {
+ _, containerLenS := decContLens(d.d, currEncodedType)
+ s := *v
+ if s == nil {
+ s = make([]int64, containerLenS, containerLenS)
+ } else if containerLenS > cap(s) {
+ if doNotReset {
+ decErr(msgDecCannotExpandArr, cap(s), containerLenS)
+ }
+ s = make([]int64, containerLenS, containerLenS)
+ copy(s, *v)
+ } else if containerLenS > len(s) {
+ s = s[:containerLenS]
+ }
+ for j := 0; j < containerLenS; j++ {
+ // d.decode(&s[j])
+ d.d.initReadNext()
+ s[j] = d.d.decodeInt(intBitsize)
+ }
+ *v = s
+}
+
+func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) {
+ _, containerLenS := decContLens(d.d, currEncodedType)
+ s := *v
+ if s == nil {
+ s = make([]uint64, containerLenS, containerLenS)
+ } else if containerLenS > cap(s) {
+ if doNotReset {
+ decErr(msgDecCannotExpandArr, cap(s), containerLenS)
+ }
+ s = make([]uint64, containerLenS, containerLenS)
+ copy(s, *v)
+ } else if containerLenS > len(s) {
+ s = s[:containerLenS]
+ }
+ for j := 0; j < containerLenS; j++ {
+ // d.decode(&s[j])
+ d.d.initReadNext()
+ s[j] = d.d.decodeUint(intBitsize)
+ }
+ *v = s
+}
+
+func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) {
+ _, containerLenS := decContLens(d.d, currEncodedType)
+ s := *v
+ if s == nil {
+ s = make([]string, containerLenS, containerLenS)
+ } else if containerLenS > cap(s) {
+ if doNotReset {
+ decErr(msgDecCannotExpandArr, cap(s), containerLenS)
+ }
+ s = make([]string, containerLenS, containerLenS)
+ copy(s, *v)
+ } else if containerLenS > len(s) {
+ s = s[:containerLenS]
+ }
+ for j := 0; j < containerLenS; j++ {
+ // d.decode(&s[j])
+ d.d.initReadNext()
+ s[j] = d.d.decodeString()
+ }
+ *v = s
+}
+
+func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) {
+ containerLen := d.d.readMapLen()
+ m := *v
+ if m == nil {
+ m = make(map[interface{}]interface{}, containerLen)
+ *v = m
+ }
+ for j := 0; j < containerLen; j++ {
+ var mk interface{}
+ d.decode(&mk)
+ // special case if a byte array.
+ if bv, bok := mk.([]byte); bok {
+ mk = string(bv)
+ }
+ mv := m[mk]
+ d.decode(&mv)
+ m[mk] = mv
+ }
+}
+
+func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) {
+ containerLen := d.d.readMapLen()
+ m := *v
+ if m == nil {
+ m = make(map[int64]interface{}, containerLen)
+ *v = m
+ }
+ for j := 0; j < containerLen; j++ {
+ d.d.initReadNext()
+ mk := d.d.decodeInt(intBitsize)
+ mv := m[mk]
+ d.decode(&mv)
+ m[mk] = mv
+ }
+}
+
+func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) {
+ containerLen := d.d.readMapLen()
+ m := *v
+ if m == nil {
+ m = make(map[uint64]interface{}, containerLen)
+ *v = m
+ }
+ for j := 0; j < containerLen; j++ {
+ d.d.initReadNext()
+ mk := d.d.decodeUint(intBitsize)
+ mv := m[mk]
+ d.decode(&mv)
+ m[mk] = mv
+ }
+}
+
+func (d *Decoder) decMapStrIntf(v *map[string]interface{}) {
+ containerLen := d.d.readMapLen()
+ m := *v
+ if m == nil {
+ m = make(map[string]interface{}, containerLen)
+ *v = m
+ }
+ for j := 0; j < containerLen; j++ {
+ d.d.initReadNext()
+ mk := d.d.decodeString()
+ mv := m[mk]
+ d.decode(&mv)
+ m[mk] = mv
+ }
+}
+
+// ----------------------------------------
+
+func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) {
+ if currEncodedType == valueTypeInvalid {
+ currEncodedType = dd.currentEncodedType()
+ }
+ switch currEncodedType {
+ case valueTypeArray:
+ containerLen = dd.readArrayLen()
+ containerLenS = containerLen
+ case valueTypeMap:
+ containerLen = dd.readMapLen()
+ containerLenS = containerLen * 2
+ default:
+ decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)",
+ currEncodedType)
+ }
+ return
+}
+
+func decErr(format string, params ...interface{}) {
+ doPanic(msgTagDec, format, params...)
+}
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go
new file mode 100644
index 000000000..4914be0c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go
@@ -0,0 +1,1001 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+ "io"
+ "reflect"
+)
+
+const (
+ // Some tagging information for error messages.
+ msgTagEnc = "codec.encoder"
+ defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024
+ // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366
+)
+
+// AsSymbolFlag defines what should be encoded as symbols.
+type AsSymbolFlag uint8
+
+const (
+ // AsSymbolDefault is default.
+ // Currently, this means only encode struct field names as symbols.
+ // The default is subject to change.
+ AsSymbolDefault AsSymbolFlag = iota
+
+ // AsSymbolAll means encode anything which could be a symbol as a symbol.
+ AsSymbolAll = 0xfe
+
+ // AsSymbolNone means do not encode anything as a symbol.
+ AsSymbolNone = 1 << iota
+
+ // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols.
+ AsSymbolMapStringKeysFlag
+
+ // AsSymbolStructFieldName means encode struct field names as symbols.
+ AsSymbolStructFieldNameFlag
+)
+
+// encWriter abstracting writing to a byte array or to an io.Writer.
+type encWriter interface {
+ writeUint16(uint16)
+ writeUint32(uint32)
+ writeUint64(uint64)
+ writeb([]byte)
+ writestr(string)
+ writen1(byte)
+ writen2(byte, byte)
+ atEndOfEncode()
+}
+
+// encDriver abstracts the actual codec (binc vs msgpack, etc)
+type encDriver interface {
+ isBuiltinType(rt uintptr) bool
+ encodeBuiltin(rt uintptr, v interface{})
+ encodeNil()
+ encodeInt(i int64)
+ encodeUint(i uint64)
+ encodeBool(b bool)
+ encodeFloat32(f float32)
+ encodeFloat64(f float64)
+ encodeExtPreamble(xtag byte, length int)
+ encodeArrayPreamble(length int)
+ encodeMapPreamble(length int)
+ encodeString(c charEncoding, v string)
+ encodeSymbol(v string)
+ encodeStringBytes(c charEncoding, v []byte)
+ //TODO
+ //encBignum(f *big.Int)
+ //encStringRunes(c charEncoding, v []rune)
+}
+
+type ioEncWriterWriter interface {
+ WriteByte(c byte) error
+ WriteString(s string) (n int, err error)
+ Write(p []byte) (n int, err error)
+}
+
+type ioEncStringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+type EncodeOptions struct {
+ // Encode a struct as an array, and not as a map.
+ StructToArray bool
+
+ // AsSymbols defines what should be encoded as symbols.
+ //
+ // Encoding as symbols can reduce the encoded size significantly.
+ //
+ // However, during decoding, each string to be encoded as a symbol must
+ // be checked to see if it has been seen before. Consequently, encoding time
+ // will increase if using symbols, because string comparisons has a clear cost.
+ //
+ // Sample values:
+ // AsSymbolNone
+ // AsSymbolAll
+ // AsSymbolMapStringKeys
+ // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag
+ AsSymbols AsSymbolFlag
+}
+
+// ---------------------------------------------
+
+type simpleIoEncWriterWriter struct {
+ w io.Writer
+ bw io.ByteWriter
+ sw ioEncStringWriter
+}
+
+func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) {
+ if o.bw != nil {
+ return o.bw.WriteByte(c)
+ }
+ _, err = o.w.Write([]byte{c})
+ return
+}
+
+func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) {
+ if o.sw != nil {
+ return o.sw.WriteString(s)
+ }
+ return o.w.Write([]byte(s))
+}
+
+func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) {
+ return o.w.Write(p)
+}
+
+// ----------------------------------------
+
+// ioEncWriter implements encWriter and can write to an io.Writer implementation
+type ioEncWriter struct {
+ w ioEncWriterWriter
+ x [8]byte // temp byte array re-used internally for efficiency
+}
+
+func (z *ioEncWriter) writeUint16(v uint16) {
+ bigen.PutUint16(z.x[:2], v)
+ z.writeb(z.x[:2])
+}
+
+func (z *ioEncWriter) writeUint32(v uint32) {
+ bigen.PutUint32(z.x[:4], v)
+ z.writeb(z.x[:4])
+}
+
+func (z *ioEncWriter) writeUint64(v uint64) {
+ bigen.PutUint64(z.x[:8], v)
+ z.writeb(z.x[:8])
+}
+
+func (z *ioEncWriter) writeb(bs []byte) {
+ if len(bs) == 0 {
+ return
+ }
+ n, err := z.w.Write(bs)
+ if err != nil {
+ panic(err)
+ }
+ if n != len(bs) {
+ encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)
+ }
+}
+
+func (z *ioEncWriter) writestr(s string) {
+ n, err := z.w.WriteString(s)
+ if err != nil {
+ panic(err)
+ }
+ if n != len(s) {
+ encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)
+ }
+}
+
+func (z *ioEncWriter) writen1(b byte) {
+ if err := z.w.WriteByte(b); err != nil {
+ panic(err)
+ }
+}
+
+func (z *ioEncWriter) writen2(b1 byte, b2 byte) {
+ z.writen1(b1)
+ z.writen1(b2)
+}
+
+func (z *ioEncWriter) atEndOfEncode() {}
+
+// ----------------------------------------
+
+// bytesEncWriter implements encWriter and can write to an byte slice.
+// It is used by Marshal function.
+type bytesEncWriter struct {
+ b []byte
+ c int // cursor
+ out *[]byte // write out on atEndOfEncode
+}
+
+func (z *bytesEncWriter) writeUint16(v uint16) {
+ c := z.grow(2)
+ z.b[c] = byte(v >> 8)
+ z.b[c+1] = byte(v)
+}
+
+func (z *bytesEncWriter) writeUint32(v uint32) {
+ c := z.grow(4)
+ z.b[c] = byte(v >> 24)
+ z.b[c+1] = byte(v >> 16)
+ z.b[c+2] = byte(v >> 8)
+ z.b[c+3] = byte(v)
+}
+
+func (z *bytesEncWriter) writeUint64(v uint64) {
+ c := z.grow(8)
+ z.b[c] = byte(v >> 56)
+ z.b[c+1] = byte(v >> 48)
+ z.b[c+2] = byte(v >> 40)
+ z.b[c+3] = byte(v >> 32)
+ z.b[c+4] = byte(v >> 24)
+ z.b[c+5] = byte(v >> 16)
+ z.b[c+6] = byte(v >> 8)
+ z.b[c+7] = byte(v)
+}
+
+func (z *bytesEncWriter) writeb(s []byte) {
+ if len(s) == 0 {
+ return
+ }
+ c := z.grow(len(s))
+ copy(z.b[c:], s)
+}
+
+func (z *bytesEncWriter) writestr(s string) {
+ c := z.grow(len(s))
+ copy(z.b[c:], s)
+}
+
+func (z *bytesEncWriter) writen1(b1 byte) {
+ c := z.grow(1)
+ z.b[c] = b1
+}
+
+func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
+ c := z.grow(2)
+ z.b[c] = b1
+ z.b[c+1] = b2
+}
+
+func (z *bytesEncWriter) atEndOfEncode() {
+ *(z.out) = z.b[:z.c]
+}
+
+func (z *bytesEncWriter) grow(n int) (oldcursor int) {
+ oldcursor = z.c
+ z.c = oldcursor + n
+ if z.c > cap(z.b) {
+ // Tried using appendslice logic: (if cap < 1024, *2, else *1.25).
+ // However, it was too expensive, causing too many iterations of copy.
+ // Using bytes.Buffer model was much better (2*cap + n)
+ bs := make([]byte, 2*cap(z.b)+n)
+ copy(bs, z.b[:oldcursor])
+ z.b = bs
+ } else if z.c > len(z.b) {
+ z.b = z.b[:cap(z.b)]
+ }
+ return
+}
+
+// ---------------------------------------------
+
+type encFnInfo struct {
+ ti *typeInfo
+ e *Encoder
+ ee encDriver
+ xfFn func(reflect.Value) ([]byte, error)
+ xfTag byte
+}
+
+func (f *encFnInfo) builtin(rv reflect.Value) {
+ f.ee.encodeBuiltin(f.ti.rtid, rv.Interface())
+}
+
+func (f *encFnInfo) rawExt(rv reflect.Value) {
+ f.e.encRawExt(rv.Interface().(RawExt))
+}
+
+func (f *encFnInfo) ext(rv reflect.Value) {
+ bs, fnerr := f.xfFn(rv)
+ if fnerr != nil {
+ panic(fnerr)
+ }
+ if bs == nil {
+ f.ee.encodeNil()
+ return
+ }
+ if f.e.hh.writeExt() {
+ f.ee.encodeExtPreamble(f.xfTag, len(bs))
+ f.e.w.writeb(bs)
+ } else {
+ f.ee.encodeStringBytes(c_RAW, bs)
+ }
+
+}
+
+func (f *encFnInfo) binaryMarshal(rv reflect.Value) {
+ var bm binaryMarshaler
+ if f.ti.mIndir == 0 {
+ bm = rv.Interface().(binaryMarshaler)
+ } else if f.ti.mIndir == -1 {
+ bm = rv.Addr().Interface().(binaryMarshaler)
+ } else {
+ for j, k := int8(0), f.ti.mIndir; j < k; j++ {
+ if rv.IsNil() {
+ f.ee.encodeNil()
+ return
+ }
+ rv = rv.Elem()
+ }
+ bm = rv.Interface().(binaryMarshaler)
+ }
+ // debugf(">>>> binaryMarshaler: %T", rv.Interface())
+ bs, fnerr := bm.MarshalBinary()
+ if fnerr != nil {
+ panic(fnerr)
+ }
+ if bs == nil {
+ f.ee.encodeNil()
+ } else {
+ f.ee.encodeStringBytes(c_RAW, bs)
+ }
+}
+
+func (f *encFnInfo) kBool(rv reflect.Value) {
+ f.ee.encodeBool(rv.Bool())
+}
+
+func (f *encFnInfo) kString(rv reflect.Value) {
+ f.ee.encodeString(c_UTF8, rv.String())
+}
+
+func (f *encFnInfo) kFloat64(rv reflect.Value) {
+ f.ee.encodeFloat64(rv.Float())
+}
+
+func (f *encFnInfo) kFloat32(rv reflect.Value) {
+ f.ee.encodeFloat32(float32(rv.Float()))
+}
+
+func (f *encFnInfo) kInt(rv reflect.Value) {
+ f.ee.encodeInt(rv.Int())
+}
+
+func (f *encFnInfo) kUint(rv reflect.Value) {
+ f.ee.encodeUint(rv.Uint())
+}
+
+func (f *encFnInfo) kInvalid(rv reflect.Value) {
+ f.ee.encodeNil()
+}
+
+func (f *encFnInfo) kErr(rv reflect.Value) {
+ encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv)
+}
+
+func (f *encFnInfo) kSlice(rv reflect.Value) {
+ if rv.IsNil() {
+ f.ee.encodeNil()
+ return
+ }
+
+ if shortCircuitReflectToFastPath {
+ switch f.ti.rtid {
+ case intfSliceTypId:
+ f.e.encSliceIntf(rv.Interface().([]interface{}))
+ return
+ case strSliceTypId:
+ f.e.encSliceStr(rv.Interface().([]string))
+ return
+ case uint64SliceTypId:
+ f.e.encSliceUint64(rv.Interface().([]uint64))
+ return
+ case int64SliceTypId:
+ f.e.encSliceInt64(rv.Interface().([]int64))
+ return
+ }
+ }
+
+ // If in this method, then there was no extension function defined.
+ // So it's okay to treat as []byte.
+ if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 {
+ f.ee.encodeStringBytes(c_RAW, rv.Bytes())
+ return
+ }
+
+ l := rv.Len()
+ if f.ti.mbs {
+ if l%2 == 1 {
+ encErr("mapBySlice: invalid length (must be divisible by 2): %v", l)
+ }
+ f.ee.encodeMapPreamble(l / 2)
+ } else {
+ f.ee.encodeArrayPreamble(l)
+ }
+ if l == 0 {
+ return
+ }
+ for j := 0; j < l; j++ {
+ // TODO: Consider perf implication of encoding odd index values as symbols if type is string
+ f.e.encodeValue(rv.Index(j))
+ }
+}
+
+func (f *encFnInfo) kArray(rv reflect.Value) {
+ // We cannot share kSlice method, because the array may be non-addressable.
+ // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array".
+ // So we have to duplicate the functionality here.
+ // f.e.encodeValue(rv.Slice(0, rv.Len()))
+ // f.kSlice(rv.Slice(0, rv.Len()))
+
+ l := rv.Len()
+ // Handle an array of bytes specially (in line with what is done for slices)
+ if f.ti.rt.Elem().Kind() == reflect.Uint8 {
+ if l == 0 {
+ f.ee.encodeStringBytes(c_RAW, nil)
+ return
+ }
+ var bs []byte
+ if rv.CanAddr() {
+ bs = rv.Slice(0, l).Bytes()
+ } else {
+ bs = make([]byte, l)
+ for i := 0; i < l; i++ {
+ bs[i] = byte(rv.Index(i).Uint())
+ }
+ }
+ f.ee.encodeStringBytes(c_RAW, bs)
+ return
+ }
+
+ if f.ti.mbs {
+ if l%2 == 1 {
+ encErr("mapBySlice: invalid length (must be divisible by 2): %v", l)
+ }
+ f.ee.encodeMapPreamble(l / 2)
+ } else {
+ f.ee.encodeArrayPreamble(l)
+ }
+ if l == 0 {
+ return
+ }
+ for j := 0; j < l; j++ {
+ // TODO: Consider perf implication of encoding odd index values as symbols if type is string
+ f.e.encodeValue(rv.Index(j))
+ }
+}
+
+func (f *encFnInfo) kStruct(rv reflect.Value) {
+ fti := f.ti
+ newlen := len(fti.sfi)
+ rvals := make([]reflect.Value, newlen)
+ var encnames []string
+ e := f.e
+ tisfi := fti.sfip
+ toMap := !(fti.toArray || e.h.StructToArray)
+ // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
+ if toMap {
+ tisfi = fti.sfi
+ encnames = make([]string, newlen)
+ }
+ newlen = 0
+ for _, si := range tisfi {
+ if si.i != -1 {
+ rvals[newlen] = rv.Field(int(si.i))
+ } else {
+ rvals[newlen] = rv.FieldByIndex(si.is)
+ }
+ if toMap {
+ if si.omitEmpty && isEmptyValue(rvals[newlen]) {
+ continue
+ }
+ encnames[newlen] = si.encName
+ } else {
+ if si.omitEmpty && isEmptyValue(rvals[newlen]) {
+ rvals[newlen] = reflect.Value{} //encode as nil
+ }
+ }
+ newlen++
+ }
+
+ // debugf(">>>> kStruct: newlen: %v", newlen)
+ if toMap {
+ ee := f.ee //don't dereference everytime
+ ee.encodeMapPreamble(newlen)
+ // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
+ asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
+ for j := 0; j < newlen; j++ {
+ if asSymbols {
+ ee.encodeSymbol(encnames[j])
+ } else {
+ ee.encodeString(c_UTF8, encnames[j])
+ }
+ e.encodeValue(rvals[j])
+ }
+ } else {
+ f.ee.encodeArrayPreamble(newlen)
+ for j := 0; j < newlen; j++ {
+ e.encodeValue(rvals[j])
+ }
+ }
+}
+
+// func (f *encFnInfo) kPtr(rv reflect.Value) {
+// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called")
+// if rv.IsNil() {
+// f.ee.encodeNil()
+// return
+// }
+// f.e.encodeValue(rv.Elem())
+// }
+
+func (f *encFnInfo) kInterface(rv reflect.Value) {
+ if rv.IsNil() {
+ f.ee.encodeNil()
+ return
+ }
+ f.e.encodeValue(rv.Elem())
+}
+
+func (f *encFnInfo) kMap(rv reflect.Value) {
+ if rv.IsNil() {
+ f.ee.encodeNil()
+ return
+ }
+
+ if shortCircuitReflectToFastPath {
+ switch f.ti.rtid {
+ case mapIntfIntfTypId:
+ f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{}))
+ return
+ case mapStrIntfTypId:
+ f.e.encMapStrIntf(rv.Interface().(map[string]interface{}))
+ return
+ case mapStrStrTypId:
+ f.e.encMapStrStr(rv.Interface().(map[string]string))
+ return
+ case mapInt64IntfTypId:
+ f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{}))
+ return
+ case mapUint64IntfTypId:
+ f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{}))
+ return
+ }
+ }
+
+ l := rv.Len()
+ f.ee.encodeMapPreamble(l)
+ if l == 0 {
+ return
+ }
+ // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String
+ keyTypeIsString := f.ti.rt.Key() == stringTyp
+ var asSymbols bool
+ if keyTypeIsString {
+ asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ }
+ mks := rv.MapKeys()
+ // for j, lmks := 0, len(mks); j < lmks; j++ {
+ for j := range mks {
+ if keyTypeIsString {
+ if asSymbols {
+ f.ee.encodeSymbol(mks[j].String())
+ } else {
+ f.ee.encodeString(c_UTF8, mks[j].String())
+ }
+ } else {
+ f.e.encodeValue(mks[j])
+ }
+ f.e.encodeValue(rv.MapIndex(mks[j]))
+ }
+
+}
+
+// --------------------------------------------------
+
+// encFn encapsulates the captured variables and the encode function.
+// This way, we only do some calculations one times, and pass to the
+// code block that should be called (encapsulated in a function)
+// instead of executing the checks every time.
+type encFn struct {
+ i *encFnInfo
+ f func(*encFnInfo, reflect.Value)
+}
+
+// --------------------------------------------------
+
+// An Encoder writes an object to an output stream in the codec format.
+type Encoder struct {
+ w encWriter
+ e encDriver
+ h *BasicHandle
+ hh Handle
+ f map[uintptr]encFn
+ x []uintptr
+ s []encFn
+}
+
+// NewEncoder returns an Encoder for encoding into an io.Writer.
+//
+// For efficiency, Users are encouraged to pass in a memory buffered writer
+// (eg bufio.Writer, bytes.Buffer).
+func NewEncoder(w io.Writer, h Handle) *Encoder {
+ ww, ok := w.(ioEncWriterWriter)
+ if !ok {
+ sww := simpleIoEncWriterWriter{w: w}
+ sww.bw, _ = w.(io.ByteWriter)
+ sww.sw, _ = w.(ioEncStringWriter)
+ ww = &sww
+ //ww = bufio.NewWriterSize(w, defEncByteBufSize)
+ }
+ z := ioEncWriter{
+ w: ww,
+ }
+ return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)}
+}
+
+// NewEncoderBytes returns an encoder for encoding directly and efficiently
+// into a byte slice, using zero-copying to temporary slices.
+//
+// It will potentially replace the output byte slice pointed to.
+// After encoding, the out parameter contains the encoded contents.
+func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
+ in := *out
+ if in == nil {
+ in = make([]byte, defEncByteBufSize)
+ }
+ z := bytesEncWriter{
+ b: in,
+ out: out,
+ }
+ return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)}
+}
+
+// Encode writes an object into a stream in the codec format.
+//
+// Encoding can be configured via the "codec" struct tag for the fields.
+//
+// The "codec" key in struct field's tag value is the key name,
+// followed by an optional comma and options.
+//
+// To set an option on all fields (e.g. omitempty on all fields), you
+// can create a field called _struct, and set flags on it.
+//
+// Struct values "usually" encode as maps. Each exported struct field is encoded unless:
+// - the field's codec tag is "-", OR
+// - the field is empty and its codec tag specifies the "omitempty" option.
+//
+// When encoding as a map, the first string in the tag (before the comma)
+// is the map key string to use when encoding.
+//
+// However, struct values may encode as arrays. This happens when:
+// - StructToArray Encode option is set, OR
+// - the codec tag on the _struct field sets the "toarray" option
+//
+// Values with types that implement MapBySlice are encoded as stream maps.
+//
+// The empty values (for omitempty option) are false, 0, any nil pointer
+// or interface value, and any array, slice, map, or string of length zero.
+//
+// Anonymous fields are encoded inline if no struct tag is present.
+// Else they are encoded as regular fields.
+//
+// Examples:
+//
+// type MyStruct struct {
+// _struct bool `codec:",omitempty"` //set omitempty for every field
+// Field1 string `codec:"-"` //skip this field
+// Field2 int `codec:"myName"` //Use key "myName" in encode stream
+// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty.
+// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty.
+// ...
+// }
+//
+// type MyStruct struct {
+// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field
+// //and encode struct as an array
+// }
+//
+// The mode of encoding is based on the type of the value. When a value is seen:
+// - If an extension is registered for it, call that extension function
+// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error)
+// - Else encode it based on its reflect.Kind
+//
+// Note that struct field names and keys in map[string]XXX will be treated as symbols.
+// Some formats support symbols (e.g. binc) and will properly encode the string
+// only once in the stream, and use a tag to refer to it thereafter.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer panicToErr(&err)
+ e.encode(v)
+ e.w.atEndOfEncode()
+ return
+}
+
+func (e *Encoder) encode(iv interface{}) {
+ switch v := iv.(type) {
+ case nil:
+ e.e.encodeNil()
+
+ case reflect.Value:
+ e.encodeValue(v)
+
+ case string:
+ e.e.encodeString(c_UTF8, v)
+ case bool:
+ e.e.encodeBool(v)
+ case int:
+ e.e.encodeInt(int64(v))
+ case int8:
+ e.e.encodeInt(int64(v))
+ case int16:
+ e.e.encodeInt(int64(v))
+ case int32:
+ e.e.encodeInt(int64(v))
+ case int64:
+ e.e.encodeInt(v)
+ case uint:
+ e.e.encodeUint(uint64(v))
+ case uint8:
+ e.e.encodeUint(uint64(v))
+ case uint16:
+ e.e.encodeUint(uint64(v))
+ case uint32:
+ e.e.encodeUint(uint64(v))
+ case uint64:
+ e.e.encodeUint(v)
+ case float32:
+ e.e.encodeFloat32(v)
+ case float64:
+ e.e.encodeFloat64(v)
+
+ case []interface{}:
+ e.encSliceIntf(v)
+ case []string:
+ e.encSliceStr(v)
+ case []int64:
+ e.encSliceInt64(v)
+ case []uint64:
+ e.encSliceUint64(v)
+ case []uint8:
+ e.e.encodeStringBytes(c_RAW, v)
+
+ case map[interface{}]interface{}:
+ e.encMapIntfIntf(v)
+ case map[string]interface{}:
+ e.encMapStrIntf(v)
+ case map[string]string:
+ e.encMapStrStr(v)
+ case map[int64]interface{}:
+ e.encMapInt64Intf(v)
+ case map[uint64]interface{}:
+ e.encMapUint64Intf(v)
+
+ case *string:
+ e.e.encodeString(c_UTF8, *v)
+ case *bool:
+ e.e.encodeBool(*v)
+ case *int:
+ e.e.encodeInt(int64(*v))
+ case *int8:
+ e.e.encodeInt(int64(*v))
+ case *int16:
+ e.e.encodeInt(int64(*v))
+ case *int32:
+ e.e.encodeInt(int64(*v))
+ case *int64:
+ e.e.encodeInt(*v)
+ case *uint:
+ e.e.encodeUint(uint64(*v))
+ case *uint8:
+ e.e.encodeUint(uint64(*v))
+ case *uint16:
+ e.e.encodeUint(uint64(*v))
+ case *uint32:
+ e.e.encodeUint(uint64(*v))
+ case *uint64:
+ e.e.encodeUint(*v)
+ case *float32:
+ e.e.encodeFloat32(*v)
+ case *float64:
+ e.e.encodeFloat64(*v)
+
+ case *[]interface{}:
+ e.encSliceIntf(*v)
+ case *[]string:
+ e.encSliceStr(*v)
+ case *[]int64:
+ e.encSliceInt64(*v)
+ case *[]uint64:
+ e.encSliceUint64(*v)
+ case *[]uint8:
+ e.e.encodeStringBytes(c_RAW, *v)
+
+ case *map[interface{}]interface{}:
+ e.encMapIntfIntf(*v)
+ case *map[string]interface{}:
+ e.encMapStrIntf(*v)
+ case *map[string]string:
+ e.encMapStrStr(*v)
+ case *map[int64]interface{}:
+ e.encMapInt64Intf(*v)
+ case *map[uint64]interface{}:
+ e.encMapUint64Intf(*v)
+
+ default:
+ e.encodeValue(reflect.ValueOf(iv))
+ }
+}
+
+func (e *Encoder) encodeValue(rv reflect.Value) {
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ e.e.encodeNil()
+ return
+ }
+ rv = rv.Elem()
+ }
+
+ rt := rv.Type()
+ rtid := reflect.ValueOf(rt).Pointer()
+
+ // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) }
+ var fn encFn
+ var ok bool
+ if useMapForCodecCache {
+ fn, ok = e.f[rtid]
+ } else {
+ for i, v := range e.x {
+ if v == rtid {
+ fn, ok = e.s[i], true
+ break
+ }
+ }
+ }
+ if !ok {
+ // debugf("\tCreating new enc fn for type: %v\n", rt)
+ fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e}
+ fn.i = &fi
+ if rtid == rawExtTypId {
+ fn.f = (*encFnInfo).rawExt
+ } else if e.e.isBuiltinType(rtid) {
+ fn.f = (*encFnInfo).builtin
+ } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfTag, xfFn
+ fn.f = (*encFnInfo).ext
+ } else if supportBinaryMarshal && fi.ti.m {
+ fn.f = (*encFnInfo).binaryMarshal
+ } else {
+ switch rk := rt.Kind(); rk {
+ case reflect.Bool:
+ fn.f = (*encFnInfo).kBool
+ case reflect.String:
+ fn.f = (*encFnInfo).kString
+ case reflect.Float64:
+ fn.f = (*encFnInfo).kFloat64
+ case reflect.Float32:
+ fn.f = (*encFnInfo).kFloat32
+ case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16:
+ fn.f = (*encFnInfo).kInt
+ case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16:
+ fn.f = (*encFnInfo).kUint
+ case reflect.Invalid:
+ fn.f = (*encFnInfo).kInvalid
+ case reflect.Slice:
+ fn.f = (*encFnInfo).kSlice
+ case reflect.Array:
+ fn.f = (*encFnInfo).kArray
+ case reflect.Struct:
+ fn.f = (*encFnInfo).kStruct
+ // case reflect.Ptr:
+ // fn.f = (*encFnInfo).kPtr
+ case reflect.Interface:
+ fn.f = (*encFnInfo).kInterface
+ case reflect.Map:
+ fn.f = (*encFnInfo).kMap
+ default:
+ fn.f = (*encFnInfo).kErr
+ }
+ }
+ if useMapForCodecCache {
+ if e.f == nil {
+ e.f = make(map[uintptr]encFn, 16)
+ }
+ e.f[rtid] = fn
+ } else {
+ e.s = append(e.s, fn)
+ e.x = append(e.x, rtid)
+ }
+ }
+
+ fn.f(fn.i, rv)
+
+}
+
+func (e *Encoder) encRawExt(re RawExt) {
+ if re.Data == nil {
+ e.e.encodeNil()
+ return
+ }
+ if e.hh.writeExt() {
+ e.e.encodeExtPreamble(re.Tag, len(re.Data))
+ e.w.writeb(re.Data)
+ } else {
+ e.e.encodeStringBytes(c_RAW, re.Data)
+ }
+}
+
+// ---------------------------------------------
+// short circuit functions for common maps and slices
+
+func (e *Encoder) encSliceIntf(v []interface{}) {
+ e.e.encodeArrayPreamble(len(v))
+ for _, v2 := range v {
+ e.encode(v2)
+ }
+}
+
+func (e *Encoder) encSliceStr(v []string) {
+ e.e.encodeArrayPreamble(len(v))
+ for _, v2 := range v {
+ e.e.encodeString(c_UTF8, v2)
+ }
+}
+
+func (e *Encoder) encSliceInt64(v []int64) {
+ e.e.encodeArrayPreamble(len(v))
+ for _, v2 := range v {
+ e.e.encodeInt(v2)
+ }
+}
+
+func (e *Encoder) encSliceUint64(v []uint64) {
+ e.e.encodeArrayPreamble(len(v))
+ for _, v2 := range v {
+ e.e.encodeUint(v2)
+ }
+}
+
+func (e *Encoder) encMapStrStr(v map[string]string) {
+ e.e.encodeMapPreamble(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ for k2, v2 := range v {
+ if asSymbols {
+ e.e.encodeSymbol(k2)
+ } else {
+ e.e.encodeString(c_UTF8, k2)
+ }
+ e.e.encodeString(c_UTF8, v2)
+ }
+}
+
+func (e *Encoder) encMapStrIntf(v map[string]interface{}) {
+ e.e.encodeMapPreamble(len(v))
+ asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
+ for k2, v2 := range v {
+ if asSymbols {
+ e.e.encodeSymbol(k2)
+ } else {
+ e.e.encodeString(c_UTF8, k2)
+ }
+ e.encode(v2)
+ }
+}
+
+func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) {
+ e.e.encodeMapPreamble(len(v))
+ for k2, v2 := range v {
+ e.e.encodeInt(k2)
+ e.encode(v2)
+ }
+}
+
+func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) {
+ e.e.encodeMapPreamble(len(v))
+ for k2, v2 := range v {
+ e.e.encodeUint(uint64(k2))
+ e.encode(v2)
+ }
+}
+
+func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) {
+ e.e.encodeMapPreamble(len(v))
+ for k2, v2 := range v {
+ e.encode(k2)
+ e.encode(v2)
+ }
+}
+
+// ----------------------------------------
+
+func encErr(format string, params ...interface{}) {
+ doPanic(msgTagEnc, format, params...)
+}
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go
new file mode 100644
index 000000000..e6dc0563f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go
@@ -0,0 +1,589 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+const (
+ structTagName = "codec"
+
+ // Support
+ // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error)
+ // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error
+ // This constant flag will enable or disable it.
+ supportBinaryMarshal = true
+
+ // Each Encoder or Decoder uses a cache of functions based on conditionals,
+ // so that the conditionals are not run every time.
+ //
+ // Either a map or a slice is used to keep track of the functions.
+ // The map is more natural, but has a higher cost than a slice/array.
+ // This flag (useMapForCodecCache) controls which is used.
+ useMapForCodecCache = false
+
+ // For some common container types, we can short-circuit an elaborate
+ // reflection dance and call encode/decode directly.
+ // The currently supported types are:
+ // - slices of strings, or id's (int64,uint64) or interfaces.
+ // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf
+ shortCircuitReflectToFastPath = true
+
+ // for debugging, set this to false, to catch panic traces.
+ // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
+ recoverPanicToErr = true
+)
+
+type charEncoding uint8
+
+const (
+ c_RAW charEncoding = iota
+ c_UTF8
+ c_UTF16LE
+ c_UTF16BE
+ c_UTF32LE
+ c_UTF32BE
+)
+
+// valueType is the stream type
+type valueType uint8
+
+const (
+ valueTypeUnset valueType = iota
+ valueTypeNil
+ valueTypeInt
+ valueTypeUint
+ valueTypeFloat
+ valueTypeBool
+ valueTypeString
+ valueTypeSymbol
+ valueTypeBytes
+ valueTypeMap
+ valueTypeArray
+ valueTypeTimestamp
+ valueTypeExt
+
+ valueTypeInvalid = 0xff
+)
+
+var (
+ bigen = binary.BigEndian
+ structInfoFieldName = "_struct"
+
+ cachedTypeInfo = make(map[uintptr]*typeInfo, 4)
+ cachedTypeInfoMutex sync.RWMutex
+
+ intfSliceTyp = reflect.TypeOf([]interface{}(nil))
+ intfTyp = intfSliceTyp.Elem()
+
+ strSliceTyp = reflect.TypeOf([]string(nil))
+ boolSliceTyp = reflect.TypeOf([]bool(nil))
+ uintSliceTyp = reflect.TypeOf([]uint(nil))
+ uint8SliceTyp = reflect.TypeOf([]uint8(nil))
+ uint16SliceTyp = reflect.TypeOf([]uint16(nil))
+ uint32SliceTyp = reflect.TypeOf([]uint32(nil))
+ uint64SliceTyp = reflect.TypeOf([]uint64(nil))
+ intSliceTyp = reflect.TypeOf([]int(nil))
+ int8SliceTyp = reflect.TypeOf([]int8(nil))
+ int16SliceTyp = reflect.TypeOf([]int16(nil))
+ int32SliceTyp = reflect.TypeOf([]int32(nil))
+ int64SliceTyp = reflect.TypeOf([]int64(nil))
+ float32SliceTyp = reflect.TypeOf([]float32(nil))
+ float64SliceTyp = reflect.TypeOf([]float64(nil))
+
+ mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+ mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+ mapStrStrTyp = reflect.TypeOf(map[string]string(nil))
+
+ mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil))
+ mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil))
+ mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil))
+ mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil))
+
+ stringTyp = reflect.TypeOf("")
+ timeTyp = reflect.TypeOf(time.Time{})
+ rawExtTyp = reflect.TypeOf(RawExt{})
+
+ mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+ binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem()
+ binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem()
+
+ rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
+ intfTypId = reflect.ValueOf(intfTyp).Pointer()
+ timeTypId = reflect.ValueOf(timeTyp).Pointer()
+
+ intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer()
+ strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer()
+
+ boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer()
+ uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer()
+ uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
+ uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer()
+ uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer()
+ uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer()
+ intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer()
+ int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer()
+ int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer()
+ int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer()
+ int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer()
+ float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer()
+ float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer()
+
+ mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer()
+ mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
+ mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer()
+ mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer()
+ mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer()
+ mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer()
+ mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer()
+ // Id = reflect.ValueOf().Pointer()
+ // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
+
+ binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer()
+ binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer()
+
+ intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
+ uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
+
+ bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+)
+
+type binaryUnmarshaler interface {
+ UnmarshalBinary(data []byte) error
+}
+
+type binaryMarshaler interface {
+ MarshalBinary() (data []byte, err error)
+}
+
+// MapBySlice represents a slice which should be encoded as a map in the stream.
+// The slice contains a sequence of key-value pairs.
+type MapBySlice interface {
+ MapBySlice()
+}
+
+// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
+//
+// BasicHandle encapsulates the common options and extension functions.
+type BasicHandle struct {
+ extHandle
+ EncodeOptions
+ DecodeOptions
+}
+
+// Handle is the interface for a specific encoding format.
+//
+// Typically, a Handle is pre-configured before first time use,
+// and not modified while in use. Such a pre-configured Handle
+// is safe for concurrent access.
+type Handle interface {
+ writeExt() bool
+ getBasicHandle() *BasicHandle
+ newEncDriver(w encWriter) encDriver
+ newDecDriver(r decReader) decDriver
+}
+
+// RawExt represents raw unprocessed extension data.
+type RawExt struct {
+ Tag byte
+ Data []byte
+}
+
+type extTypeTagFn struct {
+ rtid uintptr
+ rt reflect.Type
+ tag byte
+ encFn func(reflect.Value) ([]byte, error)
+ decFn func(reflect.Value, []byte) error
+}
+
+type extHandle []*extTypeTagFn
+
+// AddExt registers an encode and decode function for a reflect.Type.
+// Note that the type must be a named type, and specifically not
+// a pointer or Interface. An error is returned if that is not honored.
+//
+// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn.
+func (o *extHandle) AddExt(
+ rt reflect.Type,
+ tag byte,
+ encfn func(reflect.Value) ([]byte, error),
+ decfn func(reflect.Value, []byte) error,
+) (err error) {
+ // o is a pointer, because we may need to initialize it
+ if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
+ err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T",
+ reflect.Zero(rt).Interface())
+ return
+ }
+
+ // o cannot be nil, since it is always embedded in a Handle.
+ // if nil, let it panic.
+ // if o == nil {
+ // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.")
+ // return
+ // }
+
+ rtid := reflect.ValueOf(rt).Pointer()
+ for _, v := range *o {
+ if v.rtid == rtid {
+ v.tag, v.encFn, v.decFn = tag, encfn, decfn
+ return
+ }
+ }
+
+ *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn})
+ return
+}
+
+func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
+ for _, v := range o {
+ if v.rtid == rtid {
+ return v
+ }
+ }
+ return nil
+}
+
+func (o extHandle) getExtForTag(tag byte) *extTypeTagFn {
+ for _, v := range o {
+ if v.tag == tag {
+ return v
+ }
+ }
+ return nil
+}
+
+func (o extHandle) getDecodeExtForTag(tag byte) (
+ rv reflect.Value, fn func(reflect.Value, []byte) error) {
+ if x := o.getExtForTag(tag); x != nil {
+ // ext is only registered for base
+ rv = reflect.New(x.rt).Elem()
+ fn = x.decFn
+ }
+ return
+}
+
+func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) {
+ if x := o.getExt(rtid); x != nil {
+ tag = x.tag
+ fn = x.decFn
+ }
+ return
+}
+
+func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) {
+ if x := o.getExt(rtid); x != nil {
+ tag = x.tag
+ fn = x.encFn
+ }
+ return
+}
+
+type structFieldInfo struct {
+ encName string // encode name
+
+ // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
+
+ is []int // (recursive/embedded) field index in struct
+ i int16 // field index in struct
+ omitEmpty bool
+ toArray bool // if field is _struct, is the toArray set?
+
+ // tag string // tag
+ // name string // field name
+ // encNameBs []byte // encoded name as byte stream
+ // ikind int // kind of the field as an int i.e. int(reflect.Kind)
+}
+
+func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
+ if fname == "" {
+ panic("parseStructFieldInfo: No Field Name")
+ }
+ si := structFieldInfo{
+ // name: fname,
+ encName: fname,
+ // tag: stag,
+ }
+
+ if stag != "" {
+ for i, s := range strings.Split(stag, ",") {
+ if i == 0 {
+ if s != "" {
+ si.encName = s
+ }
+ } else {
+ switch s {
+ case "omitempty":
+ si.omitEmpty = true
+ case "toarray":
+ si.toArray = true
+ }
+ }
+ }
+ }
+ // si.encNameBs = []byte(si.encName)
+ return &si
+}
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int {
+ return len(p)
+}
+
+func (p sfiSortedByEncName) Less(i, j int) bool {
+ return p[i].encName < p[j].encName
+}
+
+func (p sfiSortedByEncName) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+// typeInfo keeps information about each type referenced in the encode/decode sequence.
+//
+// During an encode/decode sequence, we work as below:
+// - If base is a built in type, en/decode base value
+// - If base is registered as an extension, en/decode base value
+// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+// - Else decode appropriately based on the reflect.Kind
+type typeInfo struct {
+ sfi []*structFieldInfo // sorted. Used when enc/dec struct to map.
+ sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array.
+
+ rt reflect.Type
+ rtid uintptr
+
+ // baseId gives pointer to the base reflect.Type, after deferencing
+ // the pointers. E.g. base type of ***time.Time is time.Time.
+ base reflect.Type
+ baseId uintptr
+ baseIndir int8 // number of indirections to get to base
+
+ mbs bool // base type (T or *T) is a MapBySlice
+
+ m bool // base type (T or *T) is a binaryMarshaler
+ unm bool // base type (T or *T) is a binaryUnmarshaler
+ mIndir int8 // number of indirections to get to binaryMarshaler type
+ unmIndir int8 // number of indirections to get to binaryUnmarshaler type
+ toArray bool // whether this (struct) type should be encoded as an array
+}
+
+func (ti *typeInfo) indexForEncName(name string) int {
+ //tisfi := ti.sfi
+ const binarySearchThreshold = 16
+ if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
+ // linear search. faster than binary search in my testing up to 16-field structs.
+ for i, si := range ti.sfi {
+ if si.encName == name {
+ return i
+ }
+ }
+ } else {
+ // binary search. adapted from sort/search.go.
+ h, i, j := 0, 0, sfilen
+ for i < j {
+ h = i + (j-i)/2
+ if ti.sfi[h].encName < name {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ if i < sfilen && ti.sfi[i].encName == name {
+ return i
+ }
+ }
+ return -1
+}
+
+func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ var ok bool
+ cachedTypeInfoMutex.RLock()
+ pti, ok = cachedTypeInfo[rtid]
+ cachedTypeInfoMutex.RUnlock()
+ if ok {
+ return
+ }
+
+ cachedTypeInfoMutex.Lock()
+ defer cachedTypeInfoMutex.Unlock()
+ if pti, ok = cachedTypeInfo[rtid]; ok {
+ return
+ }
+
+ ti := typeInfo{rt: rt, rtid: rtid}
+ pti = &ti
+
+ var indir int8
+ if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
+ ti.m, ti.mIndir = true, indir
+ }
+ if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok {
+ ti.unm, ti.unmIndir = true, indir
+ }
+ if ok, _ = implementsIntf(rt, mapBySliceTyp); ok {
+ ti.mbs = true
+ }
+
+ pt := rt
+ var ptIndir int8
+ // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { }
+ for pt.Kind() == reflect.Ptr {
+ pt = pt.Elem()
+ ptIndir++
+ }
+ if ptIndir == 0 {
+ ti.base = rt
+ ti.baseId = rtid
+ } else {
+ ti.base = pt
+ ti.baseId = reflect.ValueOf(pt).Pointer()
+ ti.baseIndir = ptIndir
+ }
+
+ if rt.Kind() == reflect.Struct {
+ var siInfo *structFieldInfo
+ if f, ok := rt.FieldByName(structInfoFieldName); ok {
+ siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName))
+ ti.toArray = siInfo.toArray
+ }
+ sfip := make([]*structFieldInfo, 0, rt.NumField())
+ rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo)
+
+ // // try to put all si close together
+ // const tryToPutAllStructFieldInfoTogether = true
+ // if tryToPutAllStructFieldInfoTogether {
+ // sfip2 := make([]structFieldInfo, len(sfip))
+ // for i, si := range sfip {
+ // sfip2[i] = *si
+ // }
+ // for i := range sfip {
+ // sfip[i] = &sfip2[i]
+ // }
+ // }
+
+ ti.sfip = make([]*structFieldInfo, len(sfip))
+ ti.sfi = make([]*structFieldInfo, len(sfip))
+ copy(ti.sfip, sfip)
+ sort.Sort(sfiSortedByEncName(sfip))
+ copy(ti.sfi, sfip)
+ }
+ // sfi = sfip
+ cachedTypeInfo[rtid] = pti
+ return
+}
+
+func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool,
+ sfi *[]*structFieldInfo, siInfo *structFieldInfo,
+) {
+ // for rt.Kind() == reflect.Ptr {
+ // // indexstack = append(indexstack, 0)
+ // rt = rt.Elem()
+ // }
+ for j := 0; j < rt.NumField(); j++ {
+ f := rt.Field(j)
+ stag := f.Tag.Get(structTagName)
+ if stag == "-" {
+ continue
+ }
+ if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
+ continue
+ }
+ // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it.
+ if f.Anonymous && stag == "" {
+ ft := f.Type
+ for ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if ft.Kind() == reflect.Struct {
+ indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo)
+ continue
+ }
+ }
+ // do not let fields with same name in embedded structs override field at higher level.
+ // this must be done after anonymous check, to allow anonymous field
+ // still include their child fields
+ if _, ok := fnameToHastag[f.Name]; ok {
+ continue
+ }
+ si := parseStructFieldInfo(f.Name, stag)
+ // si.ikind = int(f.Type.Kind())
+ if len(indexstack) == 0 {
+ si.i = int16(j)
+ } else {
+ si.i = -1
+ si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
+ }
+
+ if siInfo != nil {
+ if siInfo.omitEmpty {
+ si.omitEmpty = true
+ }
+ }
+ *sfi = append(*sfi, si)
+ fnameToHastag[f.Name] = stag != ""
+ }
+}
+
+func panicToErr(err *error) {
+ if recoverPanicToErr {
+ if x := recover(); x != nil {
+ //debug.PrintStack()
+ panicValToErr(x, err)
+ }
+ }
+}
+
+func doPanic(tag string, format string, params ...interface{}) {
+ params2 := make([]interface{}, len(params)+1)
+ params2[0] = tag
+ copy(params2[1:], params)
+ panic(fmt.Errorf("%s: "+format, params2...))
+}
+
+func checkOverflowFloat32(f float64, doCheck bool) {
+ if !doCheck {
+ return
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat()
+ f2 := f
+ if f2 < 0 {
+ f2 = -f
+ }
+ if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 {
+ decErr("Overflow float32 value: %v", f2)
+ }
+}
+
+func checkOverflow(ui uint64, i int64, bitsize uint8) {
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize == 0 {
+ return
+ }
+ if i != 0 {
+ if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
+ decErr("Overflow int value: %v", i)
+ }
+ }
+ if ui != 0 {
+ if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
+ decErr("Overflow uint value: %v", ui)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go
new file mode 100644
index 000000000..58417da95
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go
@@ -0,0 +1,127 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+var (
+ raisePanicAfterRecover = false
+ debugging = true
+)
+
+func panicValToErr(panicVal interface{}, err *error) {
+ switch xerr := panicVal.(type) {
+ case error:
+ *err = xerr
+ case string:
+ *err = errors.New(xerr)
+ default:
+ *err = fmt.Errorf("%v", panicVal)
+ }
+ if raisePanicAfterRecover {
+ panic(panicVal)
+ }
+ return
+}
+
+func isEmptyValueDeref(v reflect.Value, deref bool) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if deref {
+ if v.IsNil() {
+ return true
+ }
+ return isEmptyValueDeref(v.Elem(), deref)
+ } else {
+ return v.IsNil()
+ }
+ case reflect.Struct:
+ // return true if all fields are empty. else return false.
+
+ // we cannot use equality check, because some fields may be maps/slices/etc
+ // and consequently the structs are not comparable.
+ // return v.Interface() == reflect.Zero(v.Type()).Interface()
+ for i, n := 0, v.NumField(); i < n; i++ {
+ if !isEmptyValueDeref(v.Field(i), deref) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ return isEmptyValueDeref(v, true)
+}
+
+func debugf(format string, args ...interface{}) {
+ if debugging {
+ if len(format) == 0 || format[len(format)-1] != '\n' {
+ format = format + "\n"
+ }
+ fmt.Printf(format, args...)
+ }
+}
+
+func pruneSignExt(v []byte, pos bool) (n int) {
+ if len(v) < 2 {
+ } else if pos && v[0] == 0 {
+ for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
+ }
+ } else if !pos && v[0] == 0xff {
+ for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
+ }
+ }
+ return
+}
+
+func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
+ if typ == nil {
+ return
+ }
+ rt := typ
+ // The type might be a pointer and we need to keep
+ // dereferencing to the base type until we find an implementation.
+ for {
+ if rt.Implements(iTyp) {
+ return true, indir
+ }
+ if p := rt; p.Kind() == reflect.Ptr {
+ indir++
+ if indir >= math.MaxInt8 { // insane number of indirections
+ return false, 0
+ }
+ rt = p.Elem()
+ continue
+ }
+ break
+ }
+ // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
+ if typ.Kind() != reflect.Ptr {
+ // Not a pointer, but does the pointer work?
+ if reflect.PtrTo(typ).Implements(iTyp) {
+ return true, -1
+ }
+ }
+ return false, 0
+}
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go
new file mode 100644
index 000000000..da0500d19
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go
@@ -0,0 +1,816 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+/*
+MSGPACK
+
+Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
+We need to maintain compatibility with it and how it encodes integer values
+without caring about the type.
+
+For compatibility with behaviour of msgpack-c reference implementation:
+ - Go intX (>0) and uintX
+ IS ENCODED AS
+ msgpack +ve fixnum, unsigned
+ - Go intX (<0)
+ IS ENCODED AS
+ msgpack -ve fixnum, signed
+
+*/
+package codec
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "net/rpc"
+)
+
+const (
+ mpPosFixNumMin byte = 0x00
+ mpPosFixNumMax = 0x7f
+ mpFixMapMin = 0x80
+ mpFixMapMax = 0x8f
+ mpFixArrayMin = 0x90
+ mpFixArrayMax = 0x9f
+ mpFixStrMin = 0xa0
+ mpFixStrMax = 0xbf
+ mpNil = 0xc0
+ _ = 0xc1
+ mpFalse = 0xc2
+ mpTrue = 0xc3
+ mpFloat = 0xca
+ mpDouble = 0xcb
+ mpUint8 = 0xcc
+ mpUint16 = 0xcd
+ mpUint32 = 0xce
+ mpUint64 = 0xcf
+ mpInt8 = 0xd0
+ mpInt16 = 0xd1
+ mpInt32 = 0xd2
+ mpInt64 = 0xd3
+
+ // extensions below
+ mpBin8 = 0xc4
+ mpBin16 = 0xc5
+ mpBin32 = 0xc6
+ mpExt8 = 0xc7
+ mpExt16 = 0xc8
+ mpExt32 = 0xc9
+ mpFixExt1 = 0xd4
+ mpFixExt2 = 0xd5
+ mpFixExt4 = 0xd6
+ mpFixExt8 = 0xd7
+ mpFixExt16 = 0xd8
+
+ mpStr8 = 0xd9 // new
+ mpStr16 = 0xda
+ mpStr32 = 0xdb
+
+ mpArray16 = 0xdc
+ mpArray32 = 0xdd
+
+ mpMap16 = 0xde
+ mpMap32 = 0xdf
+
+ mpNegFixNumMin = 0xe0
+ mpNegFixNumMax = 0xff
+)
+
+// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
+// that the backend RPC service takes multiple arguments, which have been arranged
+// in sequence in the slice.
+//
+// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
+// array of 1 element).
+type MsgpackSpecRpcMultiArgs []interface{}
+
+// A MsgpackContainer type specifies the different types of msgpackContainers.
+type msgpackContainerType struct {
+ fixCutoff int
+ bFixMin, b8, b16, b32 byte
+ hasFixMin, has8, has8Always bool
+}
+
+var (
+ msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
+ msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
+ msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
+ msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
+)
+
+//---------------------------------------------
+
+type msgpackEncDriver struct {
+ w encWriter
+ h *MsgpackHandle
+}
+
+func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool {
+ //no builtin types. All encodings are based on kinds. Types supported as extensions.
+ return false
+}
+
+func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {}
+
+func (e *msgpackEncDriver) encodeNil() {
+ e.w.writen1(mpNil)
+}
+
+func (e *msgpackEncDriver) encodeInt(i int64) {
+
+ switch {
+ case i >= 0:
+ e.encodeUint(uint64(i))
+ case i >= -32:
+ e.w.writen1(byte(i))
+ case i >= math.MinInt8:
+ e.w.writen2(mpInt8, byte(i))
+ case i >= math.MinInt16:
+ e.w.writen1(mpInt16)
+ e.w.writeUint16(uint16(i))
+ case i >= math.MinInt32:
+ e.w.writen1(mpInt32)
+ e.w.writeUint32(uint32(i))
+ default:
+ e.w.writen1(mpInt64)
+ e.w.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) encodeUint(i uint64) {
+ switch {
+ case i <= math.MaxInt8:
+ e.w.writen1(byte(i))
+ case i <= math.MaxUint8:
+ e.w.writen2(mpUint8, byte(i))
+ case i <= math.MaxUint16:
+ e.w.writen1(mpUint16)
+ e.w.writeUint16(uint16(i))
+ case i <= math.MaxUint32:
+ e.w.writen1(mpUint32)
+ e.w.writeUint32(uint32(i))
+ default:
+ e.w.writen1(mpUint64)
+ e.w.writeUint64(uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) encodeBool(b bool) {
+ if b {
+ e.w.writen1(mpTrue)
+ } else {
+ e.w.writen1(mpFalse)
+ }
+}
+
+func (e *msgpackEncDriver) encodeFloat32(f float32) {
+ e.w.writen1(mpFloat)
+ e.w.writeUint32(math.Float32bits(f))
+}
+
+func (e *msgpackEncDriver) encodeFloat64(f float64) {
+ e.w.writen1(mpDouble)
+ e.w.writeUint64(math.Float64bits(f))
+}
+
+func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
+ switch {
+ case l == 1:
+ e.w.writen2(mpFixExt1, xtag)
+ case l == 2:
+ e.w.writen2(mpFixExt2, xtag)
+ case l == 4:
+ e.w.writen2(mpFixExt4, xtag)
+ case l == 8:
+ e.w.writen2(mpFixExt8, xtag)
+ case l == 16:
+ e.w.writen2(mpFixExt16, xtag)
+ case l < 256:
+ e.w.writen2(mpExt8, byte(l))
+ e.w.writen1(xtag)
+ case l < 65536:
+ e.w.writen1(mpExt16)
+ e.w.writeUint16(uint16(l))
+ e.w.writen1(xtag)
+ default:
+ e.w.writen1(mpExt32)
+ e.w.writeUint32(uint32(l))
+ e.w.writen1(xtag)
+ }
+}
+
+func (e *msgpackEncDriver) encodeArrayPreamble(length int) {
+ e.writeContainerLen(msgpackContainerList, length)
+}
+
+func (e *msgpackEncDriver) encodeMapPreamble(length int) {
+ e.writeContainerLen(msgpackContainerMap, length)
+}
+
+func (e *msgpackEncDriver) encodeString(c charEncoding, s string) {
+ if c == c_RAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(s))
+ } else {
+ e.writeContainerLen(msgpackContainerStr, len(s))
+ }
+ if len(s) > 0 {
+ e.w.writestr(s)
+ }
+}
+
+func (e *msgpackEncDriver) encodeSymbol(v string) {
+ e.encodeString(c_UTF8, v)
+}
+
+func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) {
+ if c == c_RAW && e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(bs))
+ } else {
+ e.writeContainerLen(msgpackContainerStr, len(bs))
+ }
+ if len(bs) > 0 {
+ e.w.writeb(bs)
+ }
+}
+
+func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
+ switch {
+ case ct.hasFixMin && l < ct.fixCutoff:
+ e.w.writen1(ct.bFixMin | byte(l))
+ case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt):
+ e.w.writen2(ct.b8, uint8(l))
+ case l < 65536:
+ e.w.writen1(ct.b16)
+ e.w.writeUint16(uint16(l))
+ default:
+ e.w.writen1(ct.b32)
+ e.w.writeUint32(uint32(l))
+ }
+}
+
+//---------------------------------------------
+
+type msgpackDecDriver struct {
+ r decReader
+ h *MsgpackHandle
+ bd byte
+ bdRead bool
+ bdType valueType
+}
+
+func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool {
+ //no builtin types. All encodings are based on kinds. Types supported as extensions.
+ return false
+}
+
+func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {}
+
+// Note: This returns either a primitive (int, bool, etc) for non-containers,
+// or a containerType, or a specific type denoting nil or extension.
+// It is called when a nil interface{} is passed, leaving it up to the DecDriver
+// to introspect the stream and decide how best to decode.
+// It deciphers the value by looking at the stream first.
+func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
+ d.initReadNext()
+ bd := d.bd
+
+ switch bd {
+ case mpNil:
+ vt = valueTypeNil
+ d.bdRead = false
+ case mpFalse:
+ vt = valueTypeBool
+ v = false
+ case mpTrue:
+ vt = valueTypeBool
+ v = true
+
+ case mpFloat:
+ vt = valueTypeFloat
+ v = float64(math.Float32frombits(d.r.readUint32()))
+ case mpDouble:
+ vt = valueTypeFloat
+ v = math.Float64frombits(d.r.readUint64())
+
+ case mpUint8:
+ vt = valueTypeUint
+ v = uint64(d.r.readn1())
+ case mpUint16:
+ vt = valueTypeUint
+ v = uint64(d.r.readUint16())
+ case mpUint32:
+ vt = valueTypeUint
+ v = uint64(d.r.readUint32())
+ case mpUint64:
+ vt = valueTypeUint
+ v = uint64(d.r.readUint64())
+
+ case mpInt8:
+ vt = valueTypeInt
+ v = int64(int8(d.r.readn1()))
+ case mpInt16:
+ vt = valueTypeInt
+ v = int64(int16(d.r.readUint16()))
+ case mpInt32:
+ vt = valueTypeInt
+ v = int64(int32(d.r.readUint32()))
+ case mpInt64:
+ vt = valueTypeInt
+ v = int64(int64(d.r.readUint64()))
+
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+ // positive fixnum (always signed)
+ vt = valueTypeInt
+ v = int64(int8(bd))
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ // negative fixnum
+ vt = valueTypeInt
+ v = int64(int8(bd))
+ case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+ if d.h.RawToString {
+ var rvm string
+ vt = valueTypeString
+ v = &rvm
+ } else {
+ var rvm = []byte{}
+ vt = valueTypeBytes
+ v = &rvm
+ }
+ decodeFurther = true
+ case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+ var rvm = []byte{}
+ vt = valueTypeBytes
+ v = &rvm
+ decodeFurther = true
+ case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ vt = valueTypeArray
+ decodeFurther = true
+ case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+ vt = valueTypeMap
+ decodeFurther = true
+ case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ clen := d.readExtLen()
+ var re RawExt
+ re.Tag = d.r.readn1()
+ re.Data = d.r.readn(clen)
+ v = &re
+ vt = valueTypeExt
+ default:
+ decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
+ }
+ }
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+// int can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) {
+ switch d.bd {
+ case mpUint8:
+ i = int64(uint64(d.r.readn1()))
+ case mpUint16:
+ i = int64(uint64(d.r.readUint16()))
+ case mpUint32:
+ i = int64(uint64(d.r.readUint32()))
+ case mpUint64:
+ i = int64(d.r.readUint64())
+ case mpInt8:
+ i = int64(int8(d.r.readn1()))
+ case mpInt16:
+ i = int64(int16(d.r.readUint16()))
+ case mpInt32:
+ i = int64(int32(d.r.readUint32()))
+ case mpInt64:
+ i = int64(d.r.readUint64())
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ i = int64(int8(d.bd))
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ i = int64(int8(d.bd))
+ default:
+ decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+ }
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize > 0 {
+ if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
+ decErr("Overflow int value: %v", i)
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// uint can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) {
+ switch d.bd {
+ case mpUint8:
+ ui = uint64(d.r.readn1())
+ case mpUint16:
+ ui = uint64(d.r.readUint16())
+ case mpUint32:
+ ui = uint64(d.r.readUint32())
+ case mpUint64:
+ ui = d.r.readUint64()
+ case mpInt8:
+ if i := int64(int8(d.r.readn1())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ decErr("Assigning negative signed value: %v, to unsigned type", i)
+ }
+ case mpInt16:
+ if i := int64(int16(d.r.readUint16())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ decErr("Assigning negative signed value: %v, to unsigned type", i)
+ }
+ case mpInt32:
+ if i := int64(int32(d.r.readUint32())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ decErr("Assigning negative signed value: %v, to unsigned type", i)
+ }
+ case mpInt64:
+ if i := int64(d.r.readUint64()); i >= 0 {
+ ui = uint64(i)
+ } else {
+ decErr("Assigning negative signed value: %v, to unsigned type", i)
+ }
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ ui = uint64(d.bd)
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd))
+ default:
+ decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
+ }
+ }
+ // check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
+ if bitsize > 0 {
+ if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
+ decErr("Overflow uint value: %v", ui)
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// float can either be decoded from msgpack type: float, double or intX
+func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) {
+ switch d.bd {
+ case mpFloat:
+ f = float64(math.Float32frombits(d.r.readUint32()))
+ case mpDouble:
+ f = math.Float64frombits(d.r.readUint64())
+ default:
+ f = float64(d.decodeInt(0))
+ }
+ checkOverflowFloat32(f, chkOverflow32)
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool, fixnum 0 or 1.
+func (d *msgpackDecDriver) decodeBool() (b bool) {
+ switch d.bd {
+ case mpFalse, 0:
+ // b = false
+ case mpTrue, 1:
+ b = true
+ default:
+ decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) decodeString() (s string) {
+ clen := d.readContainerLen(msgpackContainerStr)
+ if clen > 0 {
+ s = string(d.r.readn(clen))
+ }
+ d.bdRead = false
+ return
+}
+
+// Callers must check if changed=true (to decide whether to replace the one they have)
+func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) {
+ // bytes can be decoded from msgpackContainerStr or msgpackContainerBin
+ var clen int
+ switch d.bd {
+ case mpBin8, mpBin16, mpBin32:
+ clen = d.readContainerLen(msgpackContainerBin)
+ default:
+ clen = d.readContainerLen(msgpackContainerStr)
+ }
+ // if clen < 0 {
+ // changed = true
+ // panic("length cannot be zero. this cannot be nil.")
+ // }
+ if clen > 0 {
+ // if no contents in stream, don't update the passed byteslice
+ if len(bs) != clen {
+ // Return changed=true if length of passed slice diff from length of bytes in stream
+ if len(bs) > clen {
+ bs = bs[:clen]
+ } else {
+ bs = make([]byte, clen)
+ }
+ bsOut = bs
+ changed = true
+ }
+ d.r.readb(bs)
+ }
+ d.bdRead = false
+ return
+}
+
+// Every top-level decode funcs (i.e. decodeValue, decode) must call this first.
+func (d *msgpackDecDriver) initReadNext() {
+ if d.bdRead {
+ return
+ }
+ d.bd = d.r.readn1()
+ d.bdRead = true
+ d.bdType = valueTypeUnset
+}
+
+func (d *msgpackDecDriver) currentEncodedType() valueType {
+ if d.bdType == valueTypeUnset {
+ bd := d.bd
+ switch bd {
+ case mpNil:
+ d.bdType = valueTypeNil
+ case mpFalse, mpTrue:
+ d.bdType = valueTypeBool
+ case mpFloat, mpDouble:
+ d.bdType = valueTypeFloat
+ case mpUint8, mpUint16, mpUint32, mpUint64:
+ d.bdType = valueTypeUint
+ case mpInt8, mpInt16, mpInt32, mpInt64:
+ d.bdType = valueTypeInt
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+ d.bdType = valueTypeInt
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ d.bdType = valueTypeInt
+ case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+ if d.h.RawToString {
+ d.bdType = valueTypeString
+ } else {
+ d.bdType = valueTypeBytes
+ }
+ case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+ d.bdType = valueTypeBytes
+ case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ d.bdType = valueTypeArray
+ case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+ d.bdType = valueTypeMap
+ case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ d.bdType = valueTypeExt
+ default:
+ decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
+ }
+ }
+ }
+ return d.bdType
+}
+
+func (d *msgpackDecDriver) tryDecodeAsNil() bool {
+ if d.bd == mpNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
+ bd := d.bd
+ switch {
+ case bd == mpNil:
+ clen = -1 // to represent nil
+ case bd == ct.b8:
+ clen = int(d.r.readn1())
+ case bd == ct.b16:
+ clen = int(d.r.readUint16())
+ case bd == ct.b32:
+ clen = int(d.r.readUint32())
+ case (ct.bFixMin & bd) == ct.bFixMin:
+ clen = int(ct.bFixMin ^ bd)
+ default:
+ decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) readMapLen() int {
+ return d.readContainerLen(msgpackContainerMap)
+}
+
+func (d *msgpackDecDriver) readArrayLen() int {
+ return d.readContainerLen(msgpackContainerList)
+}
+
+func (d *msgpackDecDriver) readExtLen() (clen int) {
+ switch d.bd {
+ case mpNil:
+ clen = -1 // to represent nil
+ case mpFixExt1:
+ clen = 1
+ case mpFixExt2:
+ clen = 2
+ case mpFixExt4:
+ clen = 4
+ case mpFixExt8:
+ clen = 8
+ case mpFixExt16:
+ clen = 16
+ case mpExt8:
+ clen = int(d.r.readn1())
+ case mpExt16:
+ clen = int(d.r.readUint16())
+ case mpExt32:
+ clen = int(d.r.readUint32())
+ default:
+ decErr("decoding ext bytes: found unexpected byte: %x", d.bd)
+ }
+ return
+}
+
+func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ xbd := d.bd
+ switch {
+ case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32:
+ xbs, _ = d.decodeBytes(nil)
+ case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32,
+ xbd >= mpFixStrMin && xbd <= mpFixStrMax:
+ xbs = []byte(d.decodeString())
+ default:
+ clen := d.readExtLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ }
+ xbs = d.r.readn(clen)
+ }
+ d.bdRead = false
+ return
+}
+
+//--------------------------------------------------
+
+//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
+type MsgpackHandle struct {
+ BasicHandle
+
+ // RawToString controls how raw bytes are decoded into a nil interface{}.
+ RawToString bool
+ // WriteExt flag supports encoding configured extensions with extension tags.
+ // It also controls whether other elements of the new spec are encoded (ie Str8).
+ //
+ // With WriteExt=false, configured extensions are serialized as raw bytes
+ // and Str8 is not encoded.
+ //
+ // A stream can still be decoded into a typed value, provided an appropriate value
+ // is provided, but the type cannot be inferred from the stream. If no appropriate
+ // type is provided (e.g. decoding into a nil interface{}), you get back
+ // a []byte or string based on the setting of RawToString.
+ WriteExt bool
+}
+
+func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver {
+ return &msgpackEncDriver{w: w, h: h}
+}
+
+func (h *MsgpackHandle) newDecDriver(r decReader) decDriver {
+ return &msgpackDecDriver{r: r, h: h}
+}
+
+func (h *MsgpackHandle) writeExt() bool {
+ return h.WriteExt
+}
+
+func (h *MsgpackHandle) getBasicHandle() *BasicHandle {
+ return &h.BasicHandle
+}
+
+//--------------------------------------------------
+
+type msgpackSpecRpcCodec struct {
+ rpcCodec
+}
+
+// /////////////// Spec RPC Codec ///////////////////
+func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // WriteRequest can write to both a Go service, and other services that do
+ // not abide by the 1 argument rule of a Go service.
+ // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
+ var bodyArr []interface{}
+ if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
+ bodyArr = ([]interface{})(m)
+ } else {
+ bodyArr = []interface{}{body}
+ }
+ r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
+ return c.write(r2, nil, false, true)
+}
+
+func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ var moe interface{}
+ if r.Error != "" {
+ moe = r.Error
+ }
+ if moe != nil && body != nil {
+ body = nil
+ }
+ r2 := []interface{}{1, uint32(r.Seq), moe, body}
+ return c.write(r2, nil, false, true)
+}
+
+func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.parseCustomHeader(1, &r.Seq, &r.Error)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
+ if body == nil { // read and discard
+ return c.read(nil)
+ }
+ bodyArr := []interface{}{body}
+ return c.read(&bodyArr)
+}
+
+func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
+
+ if c.cls {
+ return io.EOF
+ }
+
+ // We read the response header by hand
+ // so that the body can be decoded on its own from the stream at a later time.
+
+ const fia byte = 0x94 //four item array descriptor value
+ // Not sure why the panic of EOF is swallowed above.
+ // if bs1 := c.dec.r.readn1(); bs1 != fia {
+ // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
+ // return
+ // }
+ var b byte
+ b, err = c.br.ReadByte()
+ if err != nil {
+ return
+ }
+ if b != fia {
+ err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
+ return
+ }
+
+ if err = c.read(&b); err != nil {
+ return
+ }
+ if b != expectTypeByte {
+ err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
+ return
+ }
+ if err = c.read(msgid); err != nil {
+ return
+ }
+ if err = c.read(methodOrError); err != nil {
+ return
+ }
+ return
+}
+
+//--------------------------------------------------
+
+// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
+// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+type msgpackSpecRpc struct{}
+
+// MsgpackSpecRpc implements Rpc using the communication protocol defined in
+// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
+// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
+var MsgpackSpecRpc msgpackSpecRpc
+
+func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ decDriver = (*msgpackDecDriver)(nil)
+var _ encDriver = (*msgpackEncDriver)(nil)
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py
new file mode 100755
index 000000000..e933838c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack_test.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+# This will create golden files in a directory passed to it.
+# A Test calls this internally to create the golden files
+# So it can process them (so we don't have to checkin the files).
+
+import msgpack, msgpackrpc, sys, os, threading
+
+def get_test_data_list():
+ # get list with all primitive types, and a combo type
+ l0 = [
+ -8,
+ -1616,
+ -32323232,
+ -6464646464646464,
+ 192,
+ 1616,
+ 32323232,
+ 6464646464646464,
+ 192,
+ -3232.0,
+ -6464646464.0,
+ 3232.0,
+ 6464646464.0,
+ False,
+ True,
+ None,
+ "someday",
+ "",
+ "bytestring",
+ 1328176922000002000,
+ -2206187877999998000,
+ 0,
+ -6795364578871345152
+ ]
+ l1 = [
+ { "true": True,
+ "false": False },
+ { "true": "True",
+ "false": False,
+ "uint16(1616)": 1616 },
+ { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
+ "int32":32323232, "bool": True,
+ "LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
+ "SHORT STRING": "1234567890" },
+ { True: "true", 8: False, "false": 0 }
+ ]
+
+ l = []
+ l.extend(l0)
+ l.append(l0)
+ l.extend(l1)
+ return l
+
+def build_test_data(destdir):
+ l = get_test_data_list()
+ for i in range(len(l)):
+ packer = msgpack.Packer()
+ serialized = packer.pack(l[i])
+ f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
+ f.write(serialized)
+ f.close()
+
+def doRpcServer(port, stopTimeSec):
+ class EchoHandler(object):
+ def Echo123(self, msg1, msg2, msg3):
+ return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
+ def EchoStruct(self, msg):
+ return ("%s" % msg)
+
+ addr = msgpackrpc.Address('localhost', port)
+ server = msgpackrpc.Server(EchoHandler())
+ server.listen(addr)
+ # run thread to stop it after stopTimeSec seconds if > 0
+ if stopTimeSec > 0:
+ def myStopRpcServer():
+ server.stop()
+ t = threading.Timer(stopTimeSec, myStopRpcServer)
+ t.start()
+ server.start()
+
+def doRpcClientToPythonSvc(port):
+ address = msgpackrpc.Address('localhost', port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print client.call("Echo123", "A1", "B2", "C3")
+ print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doRpcClientToGoSvc(port):
+ # print ">>>> port: ", port, " <<<<<"
+ address = msgpackrpc.Address('localhost', port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
+ print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
+
+def doMain(args):
+ if len(args) == 2 and args[0] == "testdata":
+ build_test_data(args[1])
+ elif len(args) == 3 and args[0] == "rpc-server":
+ doRpcServer(int(args[1]), int(args[2]))
+ elif len(args) == 2 and args[0] == "rpc-client-python-service":
+ doRpcClientToPythonSvc(int(args[1]))
+ elif len(args) == 2 and args[0] == "rpc-client-go-service":
+ doRpcClientToGoSvc(int(args[1]))
+ else:
+ print("Usage: msgpack_test.py " +
+ "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
+
+if __name__ == "__main__":
+ doMain(sys.argv[1:])
+
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go
new file mode 100644
index 000000000..d014dbdcc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+ "bufio"
+ "io"
+ "net/rpc"
+ "sync"
+)
+
+// Rpc provides a rpc Server or Client Codec for rpc communication.
+type Rpc interface {
+ ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
+ ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
+}
+
+// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
+// used by the rpc connection. It accomodates use-cases where the connection
+// should be used by rpc and non-rpc functions, e.g. streaming a file after
+// sending an rpc response.
+type RpcCodecBuffered interface {
+ BufferedReader() *bufio.Reader
+ BufferedWriter() *bufio.Writer
+}
+
+// -------------------------------------
+
+// rpcCodec defines the struct members and common methods.
+type rpcCodec struct {
+ rwc io.ReadWriteCloser
+ dec *Decoder
+ enc *Encoder
+ bw *bufio.Writer
+ br *bufio.Reader
+ mu sync.Mutex
+ cls bool
+}
+
+func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
+ bw := bufio.NewWriter(conn)
+ br := bufio.NewReader(conn)
+ return rpcCodec{
+ rwc: conn,
+ bw: bw,
+ br: br,
+ enc: NewEncoder(bw, h),
+ dec: NewDecoder(br, h),
+ }
+}
+
+func (c *rpcCodec) BufferedReader() *bufio.Reader {
+ return c.br
+}
+
+func (c *rpcCodec) BufferedWriter() *bufio.Writer {
+ return c.bw
+}
+
+func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
+ if c.cls {
+ return io.EOF
+ }
+ if err = c.enc.Encode(obj1); err != nil {
+ return
+ }
+ if writeObj2 {
+ if err = c.enc.Encode(obj2); err != nil {
+ return
+ }
+ }
+ if doFlush && c.bw != nil {
+ return c.bw.Flush()
+ }
+ return
+}
+
+func (c *rpcCodec) read(obj interface{}) (err error) {
+ if c.cls {
+ return io.EOF
+ }
+ //If nil is passed in, we should still attempt to read content to nowhere.
+ if obj == nil {
+ var obj2 interface{}
+ return c.dec.Decode(&obj2)
+ }
+ return c.dec.Decode(obj)
+}
+
+func (c *rpcCodec) Close() error {
+ if c.cls {
+ return io.EOF
+ }
+ c.cls = true
+ return c.rwc.Close()
+}
+
+func (c *rpcCodec) ReadResponseBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+type goRpcCodec struct {
+ rpcCodec
+}
+
+func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // Must protect for concurrent access as per API
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true, true)
+}
+
+func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.write(r, body, true, true)
+}
+
+func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+// goRpc is the implementation of Rpc that uses the communication protocol
+// as defined in net/rpc package.
+type goRpc struct{}
+
+// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
+// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
+var GoRpc goRpc
+
+func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go
new file mode 100644
index 000000000..9e4d148a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go
@@ -0,0 +1,461 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import "math"
+
+const (
+ _ uint8 = iota
+ simpleVdNil = 1
+ simpleVdFalse = 2
+ simpleVdTrue = 3
+ simpleVdFloat32 = 4
+ simpleVdFloat64 = 5
+
+ // each lasts for 4 (ie n, n+1, n+2, n+3)
+ simpleVdPosInt = 8
+ simpleVdNegInt = 12
+
+ // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
+ simpleVdString = 216
+ simpleVdByteArray = 224
+ simpleVdArray = 232
+ simpleVdMap = 240
+ simpleVdExt = 248
+)
+
+type simpleEncDriver struct {
+ h *SimpleHandle
+ w encWriter
+ //b [8]byte
+}
+
+func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool {
+ return false
+}
+
+func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) {
+}
+
+func (e *simpleEncDriver) encodeNil() {
+ e.w.writen1(simpleVdNil)
+}
+
+func (e *simpleEncDriver) encodeBool(b bool) {
+ if b {
+ e.w.writen1(simpleVdTrue)
+ } else {
+ e.w.writen1(simpleVdFalse)
+ }
+}
+
+func (e *simpleEncDriver) encodeFloat32(f float32) {
+ e.w.writen1(simpleVdFloat32)
+ e.w.writeUint32(math.Float32bits(f))
+}
+
+func (e *simpleEncDriver) encodeFloat64(f float64) {
+ e.w.writen1(simpleVdFloat64)
+ e.w.writeUint64(math.Float64bits(f))
+}
+
+func (e *simpleEncDriver) encodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-v), simpleVdNegInt)
+ } else {
+ e.encUint(uint64(v), simpleVdPosInt)
+ }
+}
+
+func (e *simpleEncDriver) encodeUint(v uint64) {
+ e.encUint(v, simpleVdPosInt)
+}
+
+func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
+ switch {
+ case v <= math.MaxUint8:
+ e.w.writen2(bd, uint8(v))
+ case v <= math.MaxUint16:
+ e.w.writen1(bd + 1)
+ e.w.writeUint16(uint16(v))
+ case v <= math.MaxUint32:
+ e.w.writen1(bd + 2)
+ e.w.writeUint32(uint32(v))
+ case v <= math.MaxUint64:
+ e.w.writen1(bd + 3)
+ e.w.writeUint64(v)
+ }
+}
+
+func (e *simpleEncDriver) encLen(bd byte, length int) {
+ switch {
+ case length == 0:
+ e.w.writen1(bd)
+ case length <= math.MaxUint8:
+ e.w.writen1(bd + 1)
+ e.w.writen1(uint8(length))
+ case length <= math.MaxUint16:
+ e.w.writen1(bd + 2)
+ e.w.writeUint16(uint16(length))
+ case int64(length) <= math.MaxUint32:
+ e.w.writen1(bd + 3)
+ e.w.writeUint32(uint32(length))
+ default:
+ e.w.writen1(bd + 4)
+ e.w.writeUint64(uint64(length))
+ }
+}
+
+func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(simpleVdExt, length)
+ e.w.writen1(xtag)
+}
+
+func (e *simpleEncDriver) encodeArrayPreamble(length int) {
+ e.encLen(simpleVdArray, length)
+}
+
+func (e *simpleEncDriver) encodeMapPreamble(length int) {
+ e.encLen(simpleVdMap, length)
+}
+
+func (e *simpleEncDriver) encodeString(c charEncoding, v string) {
+ e.encLen(simpleVdString, len(v))
+ e.w.writestr(v)
+}
+
+func (e *simpleEncDriver) encodeSymbol(v string) {
+ e.encodeString(c_UTF8, v)
+}
+
+func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) {
+ e.encLen(simpleVdByteArray, len(v))
+ e.w.writeb(v)
+}
+
+//------------------------------------
+
+type simpleDecDriver struct {
+ h *SimpleHandle
+ r decReader
+ bdRead bool
+ bdType valueType
+ bd byte
+ //b [8]byte
+}
+
+func (d *simpleDecDriver) initReadNext() {
+ if d.bdRead {
+ return
+ }
+ d.bd = d.r.readn1()
+ d.bdRead = true
+ d.bdType = valueTypeUnset
+}
+
+func (d *simpleDecDriver) currentEncodedType() valueType {
+ if d.bdType == valueTypeUnset {
+ switch d.bd {
+ case simpleVdNil:
+ d.bdType = valueTypeNil
+ case simpleVdTrue, simpleVdFalse:
+ d.bdType = valueTypeBool
+ case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
+ d.bdType = valueTypeUint
+ case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
+ d.bdType = valueTypeInt
+ case simpleVdFloat32, simpleVdFloat64:
+ d.bdType = valueTypeFloat
+ case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ d.bdType = valueTypeString
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ d.bdType = valueTypeBytes
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ d.bdType = valueTypeExt
+ case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
+ d.bdType = valueTypeArray
+ case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ d.bdType = valueTypeMap
+ default:
+ decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd)
+ }
+ }
+ return d.bdType
+}
+
+func (d *simpleDecDriver) tryDecodeAsNil() bool {
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return true
+ }
+ return false
+}
+
+func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool {
+ return false
+}
+
+func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) {
+}
+
+func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) {
+ switch d.bd {
+ case simpleVdPosInt:
+ ui = uint64(d.r.readn1())
+ i = int64(ui)
+ case simpleVdPosInt + 1:
+ ui = uint64(d.r.readUint16())
+ i = int64(ui)
+ case simpleVdPosInt + 2:
+ ui = uint64(d.r.readUint32())
+ i = int64(ui)
+ case simpleVdPosInt + 3:
+ ui = uint64(d.r.readUint64())
+ i = int64(ui)
+ case simpleVdNegInt:
+ ui = uint64(d.r.readn1())
+ i = -(int64(ui))
+ neg = true
+ case simpleVdNegInt + 1:
+ ui = uint64(d.r.readUint16())
+ i = -(int64(ui))
+ neg = true
+ case simpleVdNegInt + 2:
+ ui = uint64(d.r.readUint32())
+ i = -(int64(ui))
+ neg = true
+ case simpleVdNegInt + 3:
+ ui = uint64(d.r.readUint64())
+ i = -(int64(ui))
+ neg = true
+ default:
+ decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
+ }
+ // don't do this check, because callers may only want the unsigned value.
+ // if ui > math.MaxInt64 {
+ // decErr("decIntAny: Integer out of range for signed int64: %v", ui)
+ // }
+ return
+}
+
+func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) {
+ _, i, _ = d.decIntAny()
+ checkOverflow(0, i, bitsize)
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) {
+ ui, i, neg := d.decIntAny()
+ if neg {
+ decErr("Assigning negative signed value: %v, to unsigned type", i)
+ }
+ checkOverflow(ui, 0, bitsize)
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) {
+ switch d.bd {
+ case simpleVdFloat32:
+ f = float64(math.Float32frombits(d.r.readUint32()))
+ case simpleVdFloat64:
+ f = math.Float64frombits(d.r.readUint64())
+ default:
+ if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
+ _, i, _ := d.decIntAny()
+ f = float64(i)
+ } else {
+ decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
+ }
+ }
+ checkOverflowFloat32(f, chkOverflow32)
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *simpleDecDriver) decodeBool() (b bool) {
+ switch d.bd {
+ case simpleVdTrue:
+ b = true
+ case simpleVdFalse:
+ default:
+ decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) readMapLen() (length int) {
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) readArrayLen() (length int) {
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) decLen() int {
+ switch d.bd % 8 {
+ case 0:
+ return 0
+ case 1:
+ return int(d.r.readn1())
+ case 2:
+ return int(d.r.readUint16())
+ case 3:
+ ui := uint64(d.r.readUint32())
+ checkOverflow(ui, 0, intBitsize)
+ return int(ui)
+ case 4:
+ ui := d.r.readUint64()
+ checkOverflow(ui, 0, intBitsize)
+ return int(ui)
+ }
+ decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
+ return -1
+}
+
+func (d *simpleDecDriver) decodeString() (s string) {
+ s = string(d.r.readn(d.decLen()))
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) {
+ if clen := d.decLen(); clen > 0 {
+ // if no contents in stream, don't update the passed byteslice
+ if len(bs) != clen {
+ if len(bs) > clen {
+ bs = bs[:clen]
+ } else {
+ bs = make([]byte, clen)
+ }
+ bsOut = bs
+ changed = true
+ }
+ d.r.readb(bs)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
+ switch d.bd {
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ l := d.decLen()
+ xtag = d.r.readn1()
+ if verifyTag && xtag != tag {
+ decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ }
+ xbs = d.r.readn(l)
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ xbs, _ = d.decodeBytes(nil)
+ default:
+ decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
+ d.initReadNext()
+
+ switch d.bd {
+ case simpleVdNil:
+ vt = valueTypeNil
+ case simpleVdFalse:
+ vt = valueTypeBool
+ v = false
+ case simpleVdTrue:
+ vt = valueTypeBool
+ v = true
+ case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
+ vt = valueTypeUint
+ ui, _, _ := d.decIntAny()
+ v = ui
+ case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
+ vt = valueTypeInt
+ _, i, _ := d.decIntAny()
+ v = i
+ case simpleVdFloat32:
+ vt = valueTypeFloat
+ v = d.decodeFloat(true)
+ case simpleVdFloat64:
+ vt = valueTypeFloat
+ v = d.decodeFloat(false)
+ case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ vt = valueTypeString
+ v = d.decodeString()
+ case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ vt = valueTypeBytes
+ v, _ = d.decodeBytes(nil)
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ vt = valueTypeExt
+ l := d.decLen()
+ var re RawExt
+ re.Tag = d.r.readn1()
+ re.Data = d.r.readn(l)
+ v = &re
+ vt = valueTypeExt
+ case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
+ vt = valueTypeArray
+ decodeFurther = true
+ case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ vt = valueTypeMap
+ decodeFurther = true
+ default:
+ decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ return
+}
+
+//------------------------------------
+
+// SimpleHandle is a Handle for a very simple encoding format.
+//
+// simple is a simplistic codec similar to binc, but not as compact.
+// - Encoding of a value is always preceeded by the descriptor byte (bd)
+// - True, false, nil are encoded fully in 1 byte (the descriptor)
+// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
+// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
+// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
+// - Lenght of containers (strings, bytes, array, map, extensions)
+// are encoded in 0, 1, 2, 4 or 8 bytes.
+// Zero-length containers have no length encoded.
+// For others, the number of bytes is given by pow(2, bd%3)
+// - maps are encoded as [bd] [length] [[key][value]]...
+// - arrays are encoded as [bd] [length] [value]...
+// - extensions are encoded as [bd] [length] [tag] [byte]...
+// - strings/bytearrays are encoded as [bd] [length] [byte]...
+//
+// The full spec will be published soon.
+type SimpleHandle struct {
+ BasicHandle
+}
+
+func (h *SimpleHandle) newEncDriver(w encWriter) encDriver {
+ return &simpleEncDriver{w: w, h: h}
+}
+
+func (h *SimpleHandle) newDecDriver(r decReader) decDriver {
+ return &simpleDecDriver{r: r, h: h}
+}
+
+func (_ *SimpleHandle) writeExt() bool {
+ return true
+}
+
+func (h *SimpleHandle) getBasicHandle() *BasicHandle {
+ return &h.BasicHandle
+}
+
+var _ decDriver = (*simpleDecDriver)(nil)
+var _ encDriver = (*simpleEncDriver)(nil)
diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/vendor/github.com/hashicorp/go-msgpack/codec/time.go
new file mode 100644
index 000000000..c86d65328
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-msgpack/codec/time.go
@@ -0,0 +1,193 @@
+// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a BSD-style license found in the LICENSE file.
+
+package codec
+
+import (
+ "time"
+)
+
+var (
+ timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+)
+
+// EncodeTime encodes a time.Time as a []byte, including
+// information on the instant in time and UTC offset.
+//
+// Format Description
+//
+// A timestamp is composed of 3 components:
+//
+// - secs: signed integer representing seconds since unix epoch
+// - nsces: unsigned integer representing fractional seconds as a
+// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
+// - tz: signed integer representing timezone offset in minutes east of UTC,
+// and a dst (daylight savings time) flag
+//
+// When encoding a timestamp, the first byte is the descriptor, which
+// defines which components are encoded and how many bytes are used to
+// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
+// is not encoded in the byte array explicitly*.
+//
+// Descriptor 8 bits are of the form `A B C DDD EE`:
+// A: Is secs component encoded? 1 = true
+// B: Is nsecs component encoded? 1 = true
+// C: Is tz component encoded? 1 = true
+// DDD: Number of extra bytes for secs (range 0-7).
+// If A = 1, secs encoded in DDD+1 bytes.
+// If A = 0, secs is not encoded, and is assumed to be 0.
+// If A = 1, then we need at least 1 byte to encode secs.
+// DDD says the number of extra bytes beyond that 1.
+// E.g. if DDD=0, then secs is represented in 1 byte.
+// if DDD=2, then secs is represented in 3 bytes.
+// EE: Number of extra bytes for nsecs (range 0-3).
+// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
+//
+// Following the descriptor bytes, subsequent bytes are:
+//
+// secs component encoded in `DDD + 1` bytes (if A == 1)
+// nsecs component encoded in `EE + 1` bytes (if B == 1)
+// tz component encoded in 2 bytes (if C == 1)
+//
+// secs and nsecs components are integers encoded in a BigEndian
+// 2-complement encoding format.
+//
+// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
+// Least significant bit 0 are described below:
+//
+// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
+// Bit 15 = have\_dst: set to 1 if we set the dst flag.
+// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
+// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
+//
+func encodeTime(t time.Time) []byte {
+ //t := rv.Interface().(time.Time)
+ tsecs, tnsecs := t.Unix(), t.Nanosecond()
+ var (
+ bd byte
+ btmp [8]byte
+ bs [16]byte
+ i int = 1
+ )
+ l := t.Location()
+ if l == time.UTC {
+ l = nil
+ }
+ if tsecs != 0 {
+ bd = bd | 0x80
+ bigen.PutUint64(btmp[:], uint64(tsecs))
+ f := pruneSignExt(btmp[:], tsecs >= 0)
+ bd = bd | (byte(7-f) << 2)
+ copy(bs[i:], btmp[f:])
+ i = i + (8 - f)
+ }
+ if tnsecs != 0 {
+ bd = bd | 0x40
+ bigen.PutUint32(btmp[:4], uint32(tnsecs))
+ f := pruneSignExt(btmp[:4], true)
+ bd = bd | byte(3-f)
+ copy(bs[i:], btmp[f:4])
+ i = i + (4 - f)
+ }
+ if l != nil {
+ bd = bd | 0x20
+ // Note that Go Libs do not give access to dst flag.
+ _, zoneOffset := t.Zone()
+ //zoneName, zoneOffset := t.Zone()
+ zoneOffset /= 60
+ z := uint16(zoneOffset)
+ bigen.PutUint16(btmp[:2], z)
+ // clear dst flags
+ bs[i] = btmp[0] & 0x3f
+ bs[i+1] = btmp[1]
+ i = i + 2
+ }
+ bs[0] = bd
+ return bs[0:i]
+}
+
+// DecodeTime decodes a []byte into a time.Time.
+func decodeTime(bs []byte) (tt time.Time, err error) {
+ bd := bs[0]
+ var (
+ tsec int64
+ tnsec uint32
+ tz uint16
+ i byte = 1
+ i2 byte
+ n byte
+ )
+ if bd&(1<<7) != 0 {
+ var btmp [8]byte
+ n = ((bd >> 2) & 0x7) + 1
+ i2 = i + n
+ copy(btmp[8-n:], bs[i:i2])
+ //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
+ if bs[i]&(1<<7) != 0 {
+ copy(btmp[0:8-n], bsAll0xff)
+ //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
+ }
+ i = i2
+ tsec = int64(bigen.Uint64(btmp[:]))
+ }
+ if bd&(1<<6) != 0 {
+ var btmp [4]byte
+ n = (bd & 0x3) + 1
+ i2 = i + n
+ copy(btmp[4-n:], bs[i:i2])
+ i = i2
+ tnsec = bigen.Uint32(btmp[:])
+ }
+ if bd&(1<<5) == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ return
+ }
+ // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
+ // However, we need name here, so it can be shown when time is printed.
+ // Zone name is in form: UTC-08:00.
+ // Note that Go Libs do not give access to dst flag, so we ignore dst bits
+
+ i2 = i + 2
+ tz = bigen.Uint16(bs[i:i2])
+ i = i2
+ // sign extend sign bit into top 2 MSB (which were dst bits):
+ if tz&(1<<13) == 0 { // positive
+ tz = tz & 0x3fff //clear 2 MSBs: dst bits
+ } else { // negative
+ tz = tz | 0xc000 //set 2 MSBs: dst bits
+ //tzname[3] = '-' (TODO: verify. this works here)
+ }
+ tzint := int16(tz)
+ if tzint == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ } else {
+ // For Go Time, do not use a descriptive timezone.
+ // It's unnecessary, and makes it harder to do a reflect.DeepEqual.
+ // The Offset already tells what the offset should be, if not on UTC and unknown zone name.
+ // var zoneName = timeLocUTCName(tzint)
+ tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
+ }
+ return
+}
+
+func timeLocUTCName(tzint int16) string {
+ if tzint == 0 {
+ return "UTC"
+ }
+ var tzname = []byte("UTC+00:00")
+ //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
+ //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
+ var tzhr, tzmin int16
+ if tzint < 0 {
+ tzname[3] = '-' // (TODO: verify. this works here)
+ tzhr, tzmin = -tzint/60, (-tzint)%60
+ } else {
+ tzhr, tzmin = tzint/60, tzint%60
+ }
+ tzname[4] = timeDigits[tzhr/10]
+ tzname[5] = timeDigits[tzhr%10]
+ tzname[7] = timeDigits[tzmin/10]
+ tzname[8] = timeDigits[tzmin%10]
+ return string(tzname)
+ //return time.FixedZone(string(tzname), int(tzint)*60)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 000000000..be2cc4dfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/nomad/CHANGELOG.md b/vendor/github.com/hashicorp/nomad/CHANGELOG.md
new file mode 100644
index 000000000..d6e70d9e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/nomad/CHANGELOG.md
@@ -0,0 +1,468 @@
+## 0.4.1
+
+__BACKWARDS INCOMPATIBILITIES:__
+ * telemetry: Operators will have to explicitly opt-in for Nomad client to
+ publish allocation and node metrics
+
+IMPROVEMENTS:
+ * core: Allow count 0 on system jobs [GH-1421]
+ * core: Summarize the current status of registered jobs. [GH-1383, GH-1517]
+ * core: Gracefully handle short lived outages by holding RPC calls [GH-1403]
+ * core: Introduce a lost state for allocations that were on Nodes that died
+ [GH-1516]
+ * api: client Logs endpoint for streaming task logs [GH-1444]
+ * api/cli: Support for tailing/streaming files [GH-1404, GH-1420]
+ * api/server: Support for querying job summaries [GH-1455]
+ * cli: `nomad logs` command for streaming task logs [GH-1444]
+ * cli: `nomad status` shows the create time of allocations [GH-1540]
+ * cli: `nomad plan` exit code indicates if changes will occur [GH-1502]
+ * cli: status commands support JSON output and go template formating [GH-1503]
+ * cli: Validate and plan command supports reading from stdin [GH-1460,
+ GH-1458]
+ * cli: Allow basic authentication through address and environment variable
+ [GH-1610]
+ * cli: `nomad node-status` shows volume name for non-physical volumes instead
+ of showing 0B used [GH-1538]
+ * cli: Support retrieving job files using go-getter in the `run`, `plan` and
+ `validate` command [GH-1511]
+ * client: Add killing event to task state [GH-1457]
+ * client: Fingerprint network speed on Windows [GH-1443]
+ * discovery: Support for initial check status [GH-1599]
+ * discovery: Support for query params in health check urls [GH-1562]
+ * driver/docker: Allow working directory to be configured [GH-1513]
+ * driver/docker: Remove docker volumes when removing container [GH-1519]
+ * driver/docker: Set windows containers network mode to nat by default
+ [GH-1521]
+ * driver/exec: Allow chroot environment to be configurable [GH-1518]
+ * driver/qemu: Allows users to pass extra args to the qemu driver [GH-1596]
+ * telemetry: Circonus integration for telemetry metrics [GH-1459]
+ * telemetry: Allow operators to opt-in for publishing metrics [GH-1501]
+
+BUG FIXES:
+ * agent: Reload agent configuration on SIGHUP [GH-1566]
+ * core: Sanitize empty slices/maps in jobs to avoid incorrect create/destroy
+ updates [GH-1434]
+ * core: Fix race in which a Node registers and doesn't receive system jobs
+ [GH-1456]
+ * core: Fix issue in which Nodes with large amount of reserved ports would
+ casue dynamic port allocations to fail [GH-1526]
+ * core: Fix a condition in which old batch allocations could get updated even
+ after terminal. In a rare case this could cause a server panic [GH-1471]
+ * core: Do not update the Job attached to Allocations that have been marked
+ terminal [GH-1508]
+ * agent: Fix advertise address when using IPv6 [GH-1465]
+ * cli: Fix node-status when using IPv6 advertise address [GH-1465]
+ * client: Task start errors adhere to restart policy mode [GH-1405]
+ * client: Reregister with servers if node is unregistered [GH-1593]
+ * client: Killing an allocation doesn't cause allocation stats to block
+ [GH-1454]
+ * driver/docker: Disable swap on docker driver [GH-1480]
+ * driver/docker: Fix improper gating on priviledged mode [GH-1506]
+ * driver/docker: Default network type is "nat" on Windows [GH-1521]
+ * driver/docker: Cleanup created volume when destroying container [GH-1519]
+ * driver/rkt: Set host environment variables [GH-1581]
+ * driver/rkt: Validate the command and trust_prefix configs [GH-1493]
+ * plan: Plan on system jobs discounts nodes that do not meet required
+ constraints [GH-1568]
+
+## 0.4.0
+
+__BACKWARDS INCOMPATIBILITIES:__
+ * api: Tasks are no longer allowed to have slashes in their name [GH-1210]
+ * cli: Remove the eval-monitor command. Users should switch to `nomad
+ eval-status -monitor`.
+ * config: Consul configuration has been moved from client options map to
+ consul block under client configuration
+ * driver/docker: Enabled SSL by default for pulling images from docker
+ registries. [GH-1336]
+
+IMPROVEMENTS:
+ * core: Scheduler reuses blocked evaluations to avoid unbounded creation of
+ evaluations under high contention [GH-1199]
+ * core: Scheduler stores placement failures in evaluations, no longer
+ generating failed allocations for debug information [GH-1188]
+ * api: Faster JSON response encoding [GH-1182]
+ * api: Gzip compress HTTP API requests [GH-1203]
+ * api: Plan api introduced for the Job endpoint [GH-1168]
+ * api: Job endpoint can enforce Job Modify Index to ensure job is being
+ modified from a known state [GH-1243]
+ * api/client: Add resource usage APIs for retrieving tasks/allocations/host
+ resource usage [GH-1189]
+ * cli: Faster when displaying large amounts ouptuts [GH-1362]
+ * cli: Deprecate `eval-monitor` and introduce `eval-status` [GH-1206]
+ * cli: Unify the `fs` family of commands to be a single command [GH-1150]
+ * cli: Introduce `nomad plan` to dry-run a job through the scheduler and
+ determine its effects [GH-1181]
+ * cli: node-status command displays host resource usage and allocation
+ resources [GH-1261]
+ * cli: Region flag and environment variable introduced to set region
+ forwarding. Automatic region forwarding for run and plan [GH-1237]
+ * client: If Consul is available, automatically bootstrap Nomad Client
+ using the `_nomad` service in Consul. Nomad Servers now register
+ themselves with Consul to make this possible. [GH-1201]
+ * drivers: Qemu and Java can be run without an artifact being download. Useful
+ if the artifact exists inside a chrooted directory [GH-1262]
+ * driver/docker: Added a client options to set SELinux labels for container
+ bind mounts. [GH-788]
+ * driver/docker: Enabled SSL by default for pulling images from docker
+ registries. [GH-1336]
+ * server: If Consul is available, automatically bootstrap Nomad Servers
+ using the `_nomad` service in Consul. [GH-1276]
+
+BUG FIXES:
+ * core: Improve garbage collection of allocations and nodes [GH-1256]
+ * core: Fix a potential deadlock if establishing leadership fails and is
+ retried [GH-1231]
+ * core: Do not restart successful batch jobs when the node is removed/drained
+ [GH-1205]
+ * core: Fix an issue in which the scheduler could be invoked with insufficient
+ state [GH-1339]
+ * core: Updated User, Meta or Resources in a task cause create/destroy updates
+ [GH-1128, GH-1153]
+ * core: Fix blocked evaluations being run without properly accounting for
+ priority [GH-1183]
+ * api: Tasks are no longer allowed to have slashes in their name [GH-1210]
+ * client: Delete tmp files used to communicate with execcutor [GH-1241]
+ * client: Prevent the client from restoring with incorrect task state [GH-1294]
+ * discovery: Ensure service and check names are unique [GH-1143, GH-1144]
+ * driver/docker: Ensure docker client doesn't time out after a minute.
+ [GH-1184]
+ * driver/java: Fix issue in which Java on darwin attempted to chroot [GH-1262]
+ * driver/docker: Fix issue in which logs could be spliced [GH-1322]
+
+## 0.3.2 (April 22, 2016)
+
+IMPROVEMENTS:
+ * core: Garbage collection partitioned to avoid system delays [GH-1012]
+ * core: Allow count zero task groups to enable blue/green deploys [GH-931]
+ * core: Validate driver configurations when submitting jobs [GH-1062, GH-1089]
+ * core: Job Deregister forces an evaluation for the job even if it doesn't
+ exist [GH-981]
+ * core: Rename successfully finished allocations to "Complete" rather than
+ "Dead" for clarity [GH-975]
+ * cli: `alloc-status` explains restart decisions [GH-984]
+ * cli: `node-drain -self` drains the local node [GH-1068]
+ * cli: `node-status -self` queries the local node [GH-1004]
+ * cli: Destructive commands now require confirmation [GH-983]
+ * cli: `alloc-status` display is less verbose by default [GH-946]
+ * cli: `server-members` displays the current leader in each region [GH-935]
+ * cli: `run` has an `-output` flag to emit a JSON version of the job [GH-990]
+ * cli: New `inspect` command to display a submitted job's specification
+ [GH-952]
+ * cli: `node-status` display is less verbose by default and shows a node's
+ total resources [GH-946]
+ * client: `artifact` source can be interpreted [GH-1070]
+ * client: Add IP and Port environment variables [GH-1099]
+ * client: Nomad fingerprinter to detect client's version [GH-965]
+ * client: Tasks can interpret Meta set in the task group and job [GH-985]
+ * client: All tasks in a task group are killed when a task fails [GH-962]
+ * client: Pass environment variables from host to exec based tasks [GH-970]
+ * client: Allow task's to be run as particular user [GH-950, GH-978]
+ * client: `artifact` block now supports downloading paths relative to the
+ task's directory [GH-944]
+ * docker: Timeout communications with Docker Daemon to avoid deadlocks with
+ misbehaving Docker Daemon [GH-1117]
+ * discovery: Support script based health checks [GH-986]
+ * discovery: Allowing registration of services which don't expose ports
+ [GH-1092]
+ * driver/docker: Support for `tty` and `interactive` options [GH-1059]
+ * jobspec: Improved validation of services referencing port labels [GH-1097]
+ * periodic: Periodic jobs are always evaluated in UTC timezone [GH-1074]
+
+BUG FIXES:
+ * core: Prevent garbage collection of running batch jobs [GH-989]
+ * core: Trigger System scheduler when Node drain is disabled [GH-1106]
+ * core: Fix issue where in-place updated allocation double counted resources
+ [GH-957]
+ * core: Fix drained, batched allocations from being migrated indefinitely
+ [GH-1086]
+ * client: Garbage collect Docker containers on exit [GH-1071]
+ * client: Fix common exec failures on CentOS and Amazon Linux [GH-1009]
+ * client: Fix S3 artifact downloading with IAM credentials [GH-1113]
+ * client: Fix handling of environment variables containing multiple equal
+ signs [GH-1115]
+
+## 0.3.1 (March 16, 2016)
+
+__BACKWARDS INCOMPATIBILITIES:__
+ * Service names that dont conform to RFC-1123 and RFC-2782 will fail
+ validation. To fix, change service name to conform to the RFCs before
+ running the job [GH-915]
+ * Jobs that downloaded artifacts will have to be updated to the new syntax and
+ be resubmitted. The new syntax consolidates artifacts to the `task` rather
+ than being duplicated inside each driver config [GH-921]
+
+IMPROVEMENTS:
+ * cli: Validate job file schemas [GH-900]
+ * client: Add environment variables for task name, allocation ID/Name/Index
+ [GH-869, GH-896]
+ * client: Starting task is retried under the restart policy if the error is
+ recoverable [GH-859]
+ * client: Allow tasks to download artifacts, which can be archives, prior to
+ starting [GH-921]
+ * config: Validate Nomad configuration files [GH-910]
+ * config: Client config allows reserving resources [GH-910]
+ * driver/docker: Support for ECR [GH-858]
+ * driver/docker: Periodic Fingerprinting [GH-893]
+ * driver/docker: Preventing port reservation for log collection on Unix platforms [GH-897]
+ * driver/rkt: Pass DNS information to rkt driver [GH-892]
+ * jobspec: Require RFC-1123 and RFC-2782 valid service names [GH-915]
+
+BUG FIXES:
+ * core: No longer cancel evaluations that are delayed in the plan queue
+ [GH-884]
+ * api: Guard client/fs/ APIs from being accessed on a non-client node [GH-890]
+ * client: Allow dashes in variable names during interprelation [GH-857]
+ * client: Updating kill timeout adheres to operator specified maximum value [GH-878]
+ * client: Fix a case in which clients would pull but not run allocations
+ [GH-906]
+ * consul: Remove concurrent map access [GH-874]
+ * driver/exec: Stopping tasks with more than one pid in a cgroup [GH-855]
+ * executor/linux: Add /run/resolvconf/ to chroot so DNS works [GH-905]
+
+## 0.3.0 (February 25, 2016)
+
+__BACKWARDS INCOMPATIBILITIES:__
+ * Stdout and Stderr log files of tasks have moved from task/local to
+ alloc/logs [GH-851]
+ * Any users of the runtime environment variable `$NOMAD_PORT_` will need to
+ update to the new `${NOMAD_ADDR_}` varriable [GH-704]
+ * Service names that include periods will fail validation. To fix, remove any
+ periods from the service name before running the job [GH-770]
+ * Task resources are now validated and enforce minimum resources. If a job
+ specifies resources below the minimum they will need to be updated [GH-739]
+ * Node ID is no longer specifiable. For users who have set a custom Node
+ ID, the node should be drained before Nomad is updated and the data_dir
+ should be deleted before starting for the first time [GH-675]
+ * Users of custom restart policies should update to the new syntax which adds
+ a `mode` field. The `mode` can be either `fail` or `delay`. The default for
+ `batch` and `service` jobs is `fail` and `delay` respectively [GH-594]
+ * All jobs that interpret variables in constraints or driver configurations
+ will need to be updated to the new syntax which wraps the interpreted
+ variable in curly braces. (`$node.class` becomes `${node.class}`) [GH-760]
+
+IMPROVEMENTS:
+ * core: Populate job status [GH-663]
+ * core: Cgroup fingerprinter [GH-712]
+ * core: Node class constraint [GH-618]
+ * core: User specifiable kill timeout [GH-624]
+ * core: Job queueing via blocked evaluations [GH-726]
+ * core: Only reschedule failed batch allocations [GH-746]
+ * core: Add available nodes by DC to AllocMetrics [GH-619]
+ * core: Improve scheduler retry logic under contention [GH-787]
+ * core: Computed node class and stack optimization [GH-691, GH-708]
+ * core: Improved restart policy with more user configuration [GH-594]
+ * core: Periodic specification for jobs [GH-540, GH-657, GH-659, GH-668]
+ * core: Batch jobs are garbage collected from the Nomad Servers [GH-586]
+ * core: Free half the CPUs on leader node for use in plan queue and evaluation
+ broker [GH-812]
+ * core: Seed random number generator used to randomize node traversal order
+ during scheduling [GH-808]
+ * core: Performance improvements [GH-823, GH-825, GH-827, GH-830, GH-832,
+ GH-833, GH-834, GH-839]
+ * core/api: System garbage collection endpoint [GH-828]
+ * core/api: Allow users to set arbitrary headers via agent config [GH-699]
+ * core/cli: Prefix based lookups of allocs/nodes/evals/jobs [GH-575]
+ * core/cli: Print short identifiers and UX cleanup [GH-675, GH-693, GH-692]
+ * core/client: Client pulls minimum set of required allocations [GH-731]
+ * cli: Output of agent-info is sorted [GH-617]
+ * cli: Eval monitor detects zero wait condition [GH-776]
+ * cli: Ability to navigate allocation directories [GH-709, GH-798]
+ * client: Batch allocation updates to the server [GH-835]
+ * client: Log rotation for all drivers [GH-685, GH-763, GH-819]
+ * client: Only download artifacts from http, https, and S3 [GH-841]
+ * client: Create a tmp/ directory inside each task directory [GH-757]
+ * client: Store when an allocation was received by the client [GH-821]
+ * client: Heartbeating and saving state resilient under high load [GH-811]
+ * client: Handle updates to tasks Restart Policy and KillTimeout [GH-751]
+ * client: Killing a driver handle is retried with an exponential backoff
+ [GH-809]
+ * client: Send Node to server when periodic fingerprinters change Node
+ attributes/metadata [GH-749]
+ * client/api: File-system access to allocation directories [GH-669]
+ * drivers: Validate the "command" field contains a single value [GH-842]
+ * drivers: Interpret Nomad variables in environment variables/args [GH-653]
+ * driver/rkt: Add support for CPU/Memory isolation [GH-610]
+ * driver/rkt: Add support for mounting alloc/task directory [GH-645]
+ * driver/docker: Support for .dockercfg based auth for private registries
+ [GH-773]
+
+BUG FIXES:
+ * core: Node drain could only be partially applied [GH-750]
+ * core: Fix panic when eval Ack occurs at delivery limit [GH-790]
+ * cli: Handle parsing of un-named ports [GH-604]
+ * cli: Enforce absolute paths for data directories [GH-622]
+ * client: Cleanup of the allocation directory [GH-755]
+ * client: Improved stability under high contention [GH-789]
+ * client: Handle non-200 codes when parsing AWS metadata [GH-614]
+ * client: Unmounted of shared alloc dir when client is rebooted [GH-755]
+ * client/consul: Service name changes handled properly [GH-766]
+ * driver/rkt: handle broader format of rkt version outputs [GH-745]
+ * driver/qemu: failed to load image and kvm accelerator fixes [GH-656]
+
+## 0.2.3 (December 17, 2015)
+
+BUG FIXES:
+ * core: Task States not being properly updated [GH-600]
+ * client: Fixes for user lookup to support CoreOS [GH-591]
+ * discovery: Using a random prefix for nomad managed services [GH-579]
+ * discovery: De-Registering Tasks while Nomad sleeps before failed tasks are
+ restarted.
+ * discovery: Fixes for service registration when multiple allocations are bin
+ packed on a node [GH-583]
+ * configuration: Sort configuration files [GH-588]
+ * cli: RetryInterval was not being applied properly [GH-601]
+
+## 0.2.2 (December 11, 2015)
+
+IMPROVEMENTS:
+ * core: Enable `raw_exec` driver in dev mode [GH-558]
+ * cli: Server join/retry-join command line and config options [GH-527]
+ * cli: Nomad reports which config files are loaded at start time, or if none
+ are loaded [GH-536], [GH-553]
+
+BUG FIXES:
+ * core: Send syslog to `LOCAL0` by default as previously documented [GH-547]
+ * client: remove all calls to default logger [GH-570]
+ * consul: Nomad is less noisy when Consul is not running [GH-567]
+ * consul: Nomad only deregisters services that it created [GH-568]
+ * driver/exec: Shutdown a task now sends the interrupt signal first to the
+ process before forcefully killing it. [GH-543]
+ * driver/docker: Docker driver no longer leaks unix domain socket connections
+ [GH-556]
+ * fingerprint/network: Now correctly detects interfaces on Windows [GH-382]
+
+## 0.2.1 (November 28, 2015)
+
+IMPROVEMENTS:
+
+ * core: Can specify a whitelist for activating drivers [GH-467]
+ * core: Can specify a whitelist for activating fingerprinters [GH-488]
+ * core/api: Can list all known regions in the cluster [GH-495]
+ * client/spawn: spawn package tests made portable (work on Windows) [GH-442]
+ * client/executor: executor package tests made portable (work on Windows) [GH-497]
+ * client/driver: driver package tests made portable (work on windows) [GH-502]
+ * client/discovery: Added more consul client api configuration options [GH-503]
+ * driver/docker: Added TLS client options to the config file [GH-480]
+ * jobspec: More flexibility in naming Services [GH-509]
+
+BUG FIXES:
+
+ * core: Shared reference to DynamicPorts caused port conflicts when scheduling
+ count > 1 [GH-494]
+ * client/restart policy: Not restarting Batch Jobs if the exit code is 0 [GH-491]
+ * client/service discovery: Make Service IDs unique [GH-479]
+ * client/service: Fixes update to check definitions and services which are already registered [GH-498]
+ * driver/docker: Expose the container port instead of the host port [GH-466]
+ * driver/docker: Support `port_map` for static ports [GH-476]
+ * driver/docker: Pass 0.2.0-style port environment variables to the docker container [GH-476]
+ * jobspec: distinct_hosts constraint can be specified as a boolean (previously panicked) [GH-501]
+
+## 0.2.0 (November 18, 2015)
+
+__BACKWARDS INCOMPATIBILITIES:__
+
+ * core: HTTP API `/v1/node//allocations` returns full Allocation and not
+ stub [GH-402]
+ * core: Removed weight and hard/soft fields in constraints [GH-351]
+ * drivers: Qemu and Java driver configurations have been updated to both use
+ `artifact_source` as the source for external images/jars to be ran
+ * jobspec: New reserved and dynamic port specification [GH-415]
+ * jobspec/drivers: Driver configuration supports arbitrary struct to be
+ passed in jobspec [GH-415]
+
+FEATURES:
+
+ * core: Blocking queries supported in API [GH-366]
+ * core: System Scheduler that runs tasks on every node [GH-287]
+ * core: Regexp, version and lexical ordering constraints [GH-271]
+ * core: distinctHost constraint ensures Task Groups are running on distinct
+ clients [GH-321]
+ * core: Service block definition with Consul registration [GH-463, GH-460,
+ GH-458, GH-455, GH-446, GH-425]
+ * client: GCE Fingerprinting [GH-215]
+ * client: Restart policy for task groups enforced by the client [GH-369,
+ GH-393]
+ * driver/rawexec: Raw Fork/Exec Driver [GH-237]
+ * driver/rkt: Experimental Rkt Driver [GH-165, GH-247]
+ * drivers: Add support for downloading external artifacts to execute for
+ Exec, Raw exec drivers [GH-381]
+
+IMPROVEMENTS:
+
+ * core: Configurable Node GC threshold [GH-362]
+ * core: Overlap plan verification and plan application for increased
+ throughput [GH-272]
+ * cli: Output of `alloc-status` also displays task state [GH-424]
+ * cli: Output of `server-members` is sorted [GH-323]
+ * cli: Show node attributes in `node-status` [GH-313]
+ * client/fingerprint: Network fingerprinter detects interface suitable for
+ use, rather than defaulting to eth0 [GH-334, GH-356]
+ * client: Client Restore State properly reattaches to tasks and recreates
+ them as needed [GH-364, GH-380, GH-388, GH-392, GH-394, GH-397, GH-408]
+ * client: Periodic Fingerprinting [GH-391]
+ * client: Precise snapshotting of TaskRunner and AllocRunner [GH-403, GH-411]
+ * client: Task State is tracked by client [GH-416]
+ * client: Test Skip Detection [GH-221]
+ * driver/docker: Can now specify auth for docker pull [GH-390]
+ * driver/docker: Can now specify DNS and DNSSearch options [GH-390]
+ * driver/docker: Can now specify the container's hostname [GH-426]
+ * driver/docker: Containers now have names based on the task name. [GH-389]
+ * driver/docker: Mount task local and alloc directory to docker containers [GH-290]
+ * driver/docker: Now accepts any value for `network_mode` to support userspace networking plugins in docker 1.9
+ * driver/java: Pass JVM options in java driver [GH-293, GH-297]
+ * drivers: Use BlkioWeight rather than BlkioThrottleReadIopsDevice [GH-222]
+ * jobspec and drivers: Driver configuration supports arbitrary struct to be passed in jobspec [GH-415]
+
+BUG FIXES:
+
+ * core: Nomad Client/Server RPC codec encodes strings properly [GH-420]
+ * core: Reset Nack timer in response to scheduler operations [GH-325]
+ * core: Scheduler checks for updates to environment variables [GH-327]
+ * cli: Fix crash when -config was given a directory or empty path [GH-119]
+ * client/fingerprint: Use correct local interface on OS X [GH-361, GH-365]
+ * client: Nomad Client doesn't restart failed containers [GH-198]
+ * client: Reap spawn-daemon process, avoiding a zombie process [GH-240]
+ * client: Resource exhausted errors because of link-speed zero [GH-146,
+ GH-205]
+ * client: Restarting Nomad Client leads to orphaned containers [GH-159]
+ * driver/docker: Apply SELinux label for mounting directories in docker
+ [GH-377]
+ * driver/docker: Docker driver exposes ports when creating container [GH-212,
+ GH-412]
+ * driver/docker: Docker driver uses docker environment variables correctly
+ [GH-407]
+ * driver/qemu: Qemu fingerprint and tests work on both windows/linux [GH-352]
+
+## 0.1.2 (October 6, 2015)
+
+IMPROVEMENTS:
+
+ * client: Nomad client cleans allocations on exit when in dev mode [GH-214]
+ * drivers: Use go-getter for artifact retrieval, add artifact support to
+ Exec, Raw Exec drivers [GH-288]
+
+## 0.1.1 (October 5, 2015)
+
+IMPROVEMENTS:
+
+ * cli: Nomad Client configurable from command-line [GH-191]
+ * client/fingerprint: Native IP detection and user specifiable network
+ interface for fingerprinting [GH-189]
+ * driver/docker: Docker networking mode is configurable [GH-184]
+ * drivers: Set task environment variables [GH-206]
+
+BUG FIXES:
+
+ * client/fingerprint: Network fingerprinting failed if default network
+ interface did not exist [GH-189]
+ * client: Fixed issue where network resources throughput would be set to 0
+ MBits if the link speed could not be determined [GH-205]
+ * client: Improved detection of Nomad binary [GH-181]
+ * driver/docker: Docker dynamic port mapping were not being set properly
+ [GH-199]
+
+## 0.1.0 (September 28, 2015)
+
+ * Initial release
+
diff --git a/vendor/github.com/hashicorp/nomad/GNUmakefile b/vendor/github.com/hashicorp/nomad/GNUmakefile
new file mode 100644
index 000000000..27471ea68
--- /dev/null
+++ b/vendor/github.com/hashicorp/nomad/GNUmakefile
@@ -0,0 +1,85 @@
+PACKAGES = $(shell go list ./... | grep -v '/vendor/')
+VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \
+ -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
+EXTERNAL_TOOLS=\
+ github.com/kardianos/govendor \
+ github.com/mitchellh/gox \
+ golang.org/x/tools/cmd/cover \
+ github.com/axw/gocov/gocov \
+ gopkg.in/matm/v1/gocov-html \
+ github.com/ugorji/go/codec/codecgen
+
+GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*")
+
+all: test
+
+dev: format generate
+ @NOMAD_DEV=1 sh -c "'$(PWD)/scripts/build.sh'"
+
+bin: generate
+ @sh -c "'$(PWD)/scripts/build.sh'"
+
+release:
+ @$(MAKE) bin
+
+cov:
+ gocov test ./... | gocov-html > /tmp/coverage.html
+ open /tmp/coverage.html
+
+test: generate
+ @echo "--> Running go fmt" ;
+ @if [ -n "`go fmt ${PACKAGES}`" ]; then \
+ echo "[ERR] go fmt updated formatting. Please commit formatted code first."; \
+ exit 1; \
+ fi
+ @sh -c "'$(PWD)/scripts/test.sh'"
+ @$(MAKE) vet
+
+cover:
+ go list ./... | xargs -n1 go test --cover
+
+format:
+ @echo "--> Running go fmt"
+ @go fmt $(PACKAGES)
+
+generate:
+ @echo "--> Running go generate"
+ @go generate $(PACKAGES)
+ @sed -e 's|github.com/hashicorp/nomad/vendor/github.com/ugorji/go/codec|github.com/ugorji/go/codec|' nomad/structs/structs.generated.go >> structs.gen.tmp
+ @mv structs.gen.tmp nomad/structs/structs.generated.go
+
+vet:
+ @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
+ go get golang.org/x/tools/cmd/vet; \
+ fi
+ @echo "--> Running go tool vet $(VETARGS) ${GOFILES_NOVENDOR}"
+ @go tool vet $(VETARGS) ${GOFILES_NOVENDOR} ; if [ $$? -eq 1 ]; then \
+ echo ""; \
+ echo "[LINT] Vet found suspicious constructs. Please check the reported constructs"; \
+ echo "and fix them if necessary before submitting the code for review."; \
+ fi
+
+ @git grep -n `echo "log"".Print"` | grep -v 'vendor/' ; if [ $$? -eq 0 ]; then \
+ echo "[LINT] Found "log"".Printf" calls. These should use Nomad's logger instead."; \
+ fi
+
+web:
+ ./scripts/website_run.sh
+
+web-push:
+ ./scripts/website_push.sh
+
+# bootstrap the build by downloading additional tools
+bootstrap:
+ @for tool in $(EXTERNAL_TOOLS) ; do \
+ echo "Installing $$tool" ; \
+ go get $$tool; \
+ done
+
+install: bin/nomad
+ install -o root -g wheel -m 0755 ./bin/nomad /usr/local/bin/nomad
+
+travis:
+ @sh -c "'$(PWD)/scripts/travis.sh'"
+
+.PHONY: all bin cov integ test vet web web-push test-nodep
diff --git a/vendor/github.com/hashicorp/nomad/ISSUE_TEMPLATE.md b/vendor/github.com/hashicorp/nomad/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..ba4e7c2a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/nomad/ISSUE_TEMPLATE.md
@@ -0,0 +1,18 @@
+If you have a question, prepend your issue with `[question]` or preferably use the [nomad mailing list](https://www.nomadproject.io/community.html).
+
+If filing a bug please include the following:
+
+### Nomad version
+Output from `nomad version`
+
+### Operating system and Environment details
+
+### Issue
+
+### Reproduction steps
+
+### Nomad Server logs (if appropriate)
+
+### Nomad Client logs (if appropriate)
+
+### Job file (if appropriate)
diff --git a/vendor/github.com/hashicorp/nomad/LICENSE b/vendor/github.com/hashicorp/nomad/LICENSE
new file mode 100644
index 000000000..e87a115e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/nomad/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/nomad/README.md b/vendor/github.com/hashicorp/nomad/README.md
new file mode 100644
index 000000000..9ab6888d3
--- /dev/null
+++ b/vendor/github.com/hashicorp/nomad/README.md
@@ -0,0 +1,117 @@
+Nomad [![Build Status](https://travis-ci.org/hashicorp/nomad.svg)](https://travis-ci.org/hashicorp/nomad)
+=========
+
+- Website: https://www.nomadproject.io
+- IRC: `#nomad-tool` on Freenode
+- Mailing list: [Google Groups](https://groups.google.com/group/nomad-tool)
+
+![Nomad](https://raw.githubusercontent.com/hashicorp/nomad/master/website/source/assets/images/logo-header%402x.png?token=AAkIoLO_y1g3wgHMr3QO-559BN22rN0kks5V_2HpwA%3D%3D)
+
+Nomad is a cluster manager, designed for both long lived services and short
+lived batch processing workloads. Developers use a declarative job specification
+to submit work, and Nomad ensures constraints are satisfied and resource utilization
+is optimized by efficient task packing. Nomad supports all major operating systems
+and virtualized, containerized, or standalone applications.
+
+The key features of Nomad are:
+
+* **Docker Support**: Jobs can specify tasks which are Docker containers.
+ Nomad will automatically run the containers on clients which have Docker
+ installed, scale up and down based on the number of instances request,
+ and automatically recover from failures.
+
+* **Multi-Datacenter and Multi-Region Aware**: Nomad is designed to be
+ a global-scale scheduler. Multiple datacenters can be managed as part
+ of a larger region, and jobs can be scheduled across datacenters if
+ requested. Multiple regions join together and federate jobs making it
+ easy to run jobs anywhere.
+
+* **Operationally Simple**: Nomad runs as a single binary that can be
+ either a client or server, and is completely self contained. Nomad does
+ not require any external services for storage or coordination. This means
+ Nomad combines the features of a resource manager and scheduler in a single
+ system.
+
+* **Distributed and Highly-Available**: Nomad servers cluster together and
+ perform leader election and state replication to provide high availability
+ in the face of failure. The Nomad scheduling engine is optimized for
+ optimistic concurrency allowing all servers to make scheduling decisions to
+ maximize throughput.
+
+* **HashiCorp Ecosystem**: Nomad integrates with the entire HashiCorp
+ ecosystem of tools. Along with all HashiCorp tools, Nomad is designed
+ in the unix philosophy of doing something specific and doing it well.
+ Nomad integrates with tools like Packer, Consul, and Terraform to support
+ building artifacts, service discovery, monitoring and capacity management.
+
+For more information, see the [introduction section](https://www.nomadproject.io/intro)
+of the Nomad website.
+
+Getting Started & Documentation
+-------------------------------
+
+All documentation is available on the [Nomad website](https://www.nomadproject.io).
+
+Developing Nomad
+--------------------
+
+If you wish to work on Nomad itself or any of its built-in systems,
+you will first need [Go](https://www.golang.org) installed on your
+machine (version 1.5+ is *required*).
+
+**Developing with Vagrant**
+There is an included Vagrantfile that can help bootstrap the process. The
+created virtual machine is based off of Ubuntu 14, and installs several of the
+base libraries that can be used by Nomad.
+
+To use this virtual machine, checkout Nomad and run `vagrant up` from the root
+of the repository:
+
+```sh
+$ git clone https://github.com/hashicorp/nomad.git
+$ cd nomad
+$ vagrant up
+```
+
+The virtual machine will launch, and a provisioning script will install the
+needed dependencies.
+
+**Developing locally**
+For local dev first make sure Go is properly installed, including setting up a
+[GOPATH](https://golang.org/doc/code.html#GOPATH). After setting up Go, clone this
+repository into `$GOPATH/src/github.com/hashicorp/nomad`. Then you can
+download the required build tools such as vet, cover, godep etc by bootstrapping
+your environment.
+
+```sh
+$ make bootstrap
+...
+```
+
+Afterwards type `make test`. This will run the tests. If this exits with exit status 0,
+then everything is working!
+
+```sh
+$ make test
+...
+```
+
+To compile a development version of Nomad, run `make dev`. This will put the
+Nomad binary in the `bin` and `$GOPATH/bin` folders:
+
+```sh
+$ make dev
+...
+$ bin/nomad
+...
+```
+
+To cross-compile Nomad, run `make bin`. This will compile Nomad for multiple
+platforms and place the resulting binaries into the `./pkg` directory:
+
+```sh
+$ make bin
+...
+$ ls ./pkg
+...
+```
diff --git a/vendor/github.com/hashicorp/nomad/Vagrantfile b/vendor/github.com/hashicorp/nomad/Vagrantfile
new file mode 100644
index 000000000..b728f1941
--- /dev/null
+++ b/vendor/github.com/hashicorp/nomad/Vagrantfile
@@ -0,0 +1,137 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+DEFAULT_CPU_COUNT = 2
+$script = <
+
+
+
+<%= javascript_include_tag "application" %>
+
+
+
+