diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..930f9a2
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+vendor/*/
+
diff --git a/image.go b/image.go
index a1d69b3..c71df5d 100644
--- a/image.go
+++ b/image.go
@@ -100,7 +100,7 @@ func ImageInspect(imageID string) (Image, error) {
return img, err
}
-func ImageRemove(imageID string) error {
- _, err := client.ImageRemove(context.Background(), imageID, dockType.ImageRemoveOptions{Force: true, PruneChildren: true})
+func ImageRemove(imageID string, force bool) error {
+ _, err := client.ImageRemove(context.Background(), imageID, dockType.ImageRemoveOptions{Force: force, PruneChildren: true})
return err
}
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
deleted file mode 100644
index f2c2bc2..0000000
--- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# 0.10.0
-
-* feature: Add a test hook (#180)
-* feature: `ParseLevel` is now case-insensitive (#326)
-* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
-* performance: avoid re-allocations on `WithFields` (#335)
-
-# 0.9.0
-
-* logrus/text_formatter: don't emit empty msg
-* logrus/hooks/airbrake: move out of main repository
-* logrus/hooks/sentry: move out of main repository
-* logrus/hooks/papertrail: move out of main repository
-* logrus/hooks/bugsnag: move out of main repository
-* logrus/core: run tests with `-race`
-* logrus/core: detect TTY based on `stderr`
-* logrus/core: support `WithError` on logger
-* logrus/core: Solaris support
-
-# 0.8.7
-
-* logrus/core: fix possible race (#216)
-* logrus/doc: small typo fixes and doc improvements
-
-
-# 0.8.6
-
-* hooks/raven: allow passing an initialized client
-
-# 0.8.5
-
-* logrus/core: revert #208
-
-# 0.8.4
-
-* formatter/text: fix data race (#218)
-
-# 0.8.3
-
-* logrus/core: fix entry log level (#208)
-* logrus/core: improve performance of text formatter by 40%
-* logrus/core: expose `LevelHooks` type
-* logrus/core: add support for DragonflyBSD and NetBSD
-* formatter/text: print structs more verbosely
-
-# 0.8.2
-
-* logrus: fix more Fatal family functions
-
-# 0.8.1
-
-* logrus: fix not exiting on `Fatalf` and `Fatalln`
-
-# 0.8.0
-
-* logrus: defaults to stderr instead of stdout
-* hooks/sentry: add special field for `*http.Request`
-* formatter/text: ignore Windows for colors
-
-# 0.7.3
-
-* formatter/\*: allow configuration of timestamp layout
-
-# 0.7.2
-
-* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE
deleted file mode 100644
index f090cb4..0000000
--- a/vendor/github.com/Sirupsen/logrus/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Simon Eskildsen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
deleted file mode 100644
index 05d678a..0000000
--- a/vendor/github.com/Sirupsen/logrus/README.md
+++ /dev/null
@@ -1,432 +0,0 @@
-# Logrus
[](https://travis-ci.org/Sirupsen/logrus) [](https://godoc.org/github.com/Sirupsen/logrus)
-
-**Seeing weird case-sensitive problems?** See [this
-issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
-This change has been reverted. I apologize for causing this. I greatly
-underestimated the impact this would have. Logrus strives for stability and
-backwards compatibility and failed to provide that.
-
-Logrus is a structured logger for Go (golang), completely API compatible with
-the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
-yet stable (pre 1.0). Logrus itself is completely stable and has been used in
-many large deployments. The core API is unlikely to change much but please
-version control your Logrus to make sure you aren't fetching latest `master` on
-every build.**
-
-Nicely color-coded in development (when a TTY is attached, otherwise just
-plain text):
-
-
-
-With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
-or Splunk:
-
-```json
-{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
-ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
-
-{"level":"warning","msg":"The group's number increased tremendously!",
-"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
-
-{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
-"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
-
-{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
-"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
-
-{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
-"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
-```
-
-With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
-attached, the output is compatible with the
-[logfmt](http://godoc.org/github.com/kr/logfmt) format:
-
-```text
-time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
-time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
-time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
-time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
-time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
-time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
-exit status 1
-```
-
-#### Example
-
-The simplest way to use Logrus is simply the package-level exported logger:
-
-```go
-package main
-
-import (
- log "github.com/Sirupsen/logrus"
-)
-
-func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- }).Info("A walrus appears")
-}
-```
-
-Note that it's completely api-compatible with the stdlib logger, so you can
-replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
-and you'll now have the flexibility of Logrus. You can customize it all you
-want:
-
-```go
-package main
-
-import (
- "os"
- log "github.com/Sirupsen/logrus"
-)
-
-func init() {
- // Log as JSON instead of the default ASCII formatter.
- log.SetFormatter(&log.JSONFormatter{})
-
- // Output to stderr instead of stdout, could also be a file.
- log.SetOutput(os.Stderr)
-
- // Only log the warning severity or above.
- log.SetLevel(log.WarnLevel)
-}
-
-func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-
- log.WithFields(log.Fields{
- "omg": true,
- "number": 122,
- }).Warn("The group's number increased tremendously!")
-
- log.WithFields(log.Fields{
- "omg": true,
- "number": 100,
- }).Fatal("The ice breaks!")
-
- // A common pattern is to re-use fields between logging statements by re-using
- // the logrus.Entry returned from WithFields()
- contextLogger := log.WithFields(log.Fields{
- "common": "this is a common field",
- "other": "I also should be logged always",
- })
-
- contextLogger.Info("I'll be logged with common and other field")
- contextLogger.Info("Me too")
-}
-```
-
-For more advanced usage such as logging to multiple locations from the same
-application, you can also create an instance of the `logrus` Logger:
-
-```go
-package main
-
-import (
- "github.com/Sirupsen/logrus"
-)
-
-// Create a new instance of the logger. You can have any number of instances.
-var log = logrus.New()
-
-func main() {
- // The API for setting attributes is a little different than the package level
- // exported logger. See Godoc.
- log.Out = os.Stderr
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-}
-```
-
-#### Fields
-
-Logrus encourages careful, structured logging though logging fields instead of
-long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
-to send event %s to topic %s with key %d")`, you should log the much more
-discoverable:
-
-```go
-log.WithFields(log.Fields{
- "event": event,
- "topic": topic,
- "key": key,
-}).Fatal("Failed to send event")
-```
-
-We've found this API forces you to think about logging in a way that produces
-much more useful logging messages. We've been in countless situations where just
-a single added field to a log statement that was already there would've saved us
-hours. The `WithFields` call is optional.
-
-In general, with Logrus using any of the `printf`-family functions should be
-seen as a hint you should add a field, however, you can still use the
-`printf`-family functions with Logrus.
-
-#### Hooks
-
-You can add hooks for logging levels. For example to send errors to an exception
-tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
-multiple places simultaneously, e.g. syslog.
-
-Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
-`init`:
-
-```go
-import (
- log "github.com/Sirupsen/logrus"
- "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
- logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
- "log/syslog"
-)
-
-func init() {
-
- // Use the Airbrake hook to report errors that have Error severity or above to
- // an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(airbrake.NewHook(123, "xyz", "production"))
-
- hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
- if err != nil {
- log.Error("Unable to connect to local syslog daemon")
- } else {
- log.AddHook(hook)
- }
-}
-```
-Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-
-| Hook | Description |
-| ----- | ----------- |
-| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
-| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
-| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
-| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
-| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
-| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
-| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
-| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
-| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
-| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
-| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
-| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
-| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
-| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
-| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
-| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
-| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
-| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
-| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
-| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
-| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
-| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
-| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
-| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
-| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
-| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
-| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
-| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
-| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
-| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
-| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
-| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
-| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
-| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
-
-
-#### Level logging
-
-Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
-
-```go
-log.Debug("Useful debugging information.")
-log.Info("Something noteworthy happened!")
-log.Warn("You should probably take a look at this.")
-log.Error("Something failed but I'm not quitting.")
-// Calls os.Exit(1) after logging
-log.Fatal("Bye.")
-// Calls panic() after logging
-log.Panic("I'm bailing.")
-```
-
-You can set the logging level on a `Logger`, then it will only log entries with
-that severity or anything above it:
-
-```go
-// Will log anything that is info or above (warn, error, fatal, panic). Default.
-log.SetLevel(log.InfoLevel)
-```
-
-It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
-environment if your application has that.
-
-#### Entries
-
-Besides the fields added with `WithField` or `WithFields` some fields are
-automatically added to all logging events:
-
-1. `time`. The timestamp when the entry was created.
-2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
- the `AddFields` call. E.g. `Failed to send event.`
-3. `level`. The logging level. E.g. `info`.
-
-#### Environments
-
-Logrus has no notion of environment.
-
-If you wish for hooks and formatters to only be used in specific environments,
-you should handle that yourself. For example, if your application has a global
-variable `Environment`, which is a string representation of the environment you
-could do:
-
-```go
-import (
- log "github.com/Sirupsen/logrus"
-)
-
-init() {
- // do something here to set environment depending on an environment variable
- // or command-line flag
- if Environment == "production" {
- log.SetFormatter(&log.JSONFormatter{})
- } else {
- // The TextFormatter is default, you don't actually have to do this.
- log.SetFormatter(&log.TextFormatter{})
- }
-}
-```
-
-This configuration is how `logrus` was intended to be used, but JSON in
-production is mostly only useful if you do log aggregation with tools like
-Splunk or Logstash.
-
-#### Formatters
-
-The built-in logging formatters are:
-
-* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
- without colors.
- * *Note:* to force colored output when there is no TTY, set the `ForceColors`
- field to `true`. To force no colored output even if there is a TTY set the
- `DisableColors` field to `true`
-* `logrus.JSONFormatter`. Logs fields as JSON.
-
-Third party logging formatters:
-
-* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
-* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
-* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
-
-You can define your formatter by implementing the `Formatter` interface,
-requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
-`Fields` type (`map[string]interface{}`) with all your fields as well as the
-default ones (see Entries section above):
-
-```go
-type MyJSONFormatter struct {
-}
-
-log.SetFormatter(new(MyJSONFormatter))
-
-func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
- // Note this doesn't include Time, Level and Message which are available on
- // the Entry. Consult `godoc` on information about those fields or read the
- // source of the official loggers.
- serialized, err := json.Marshal(entry.Data)
- if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
- }
- return append(serialized, '\n'), nil
-}
-```
-
-#### Logger as an `io.Writer`
-
-Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
-
-```go
-w := logger.Writer()
-defer w.Close()
-
-srv := http.Server{
- // create a stdlib log.Logger that writes to
- // logrus.Logger.
- ErrorLog: log.New(w, "", 0),
-}
-```
-
-Each line written to that writer will be printed the usual way, using formatters
-and hooks. The level for those entries is `info`.
-
-#### Rotation
-
-Log rotation is not provided with Logrus. Log rotation should be done by an
-external program (like `logrotate(8)`) that can compress and delete old log
-entries. It should not be a feature of the application-level logger.
-
-#### Tools
-
-| Tool | Description |
-| ---- | ----------- |
-|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
-|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
-
-#### Testing
-
-Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
-
-* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
-* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
-
-```go
-logger, hook := NewNullLogger()
-logger.Error("Hello error")
-
-assert.Equal(1, len(hook.Entries))
-assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
-assert.Equal("Hello error", hook.LastEntry().Message)
-
-hook.Reset()
-assert.Nil(hook.LastEntry())
-```
-
-#### Fatal handlers
-
-Logrus can register one or more functions that will be called when any `fatal`
-level message is logged. The registered handlers will be executed before
-logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
-to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
-
-```
-...
-handler := func() {
- // gracefully shutdown something...
-}
-logrus.RegisterExitHandler(handler)
-...
-```
-
-#### Thread safety
-
-By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
-If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
-
-Situation when locking is not needed includes:
-
-* You have no hooks registered, or hooks calling is already thread-safe.
-
-* Writing to logger.Out is already thread-safe, for example:
-
- 1) logger.Out is protected by locks.
-
- 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
-
- (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go
deleted file mode 100644
index b4c9e84..0000000
--- a/vendor/github.com/Sirupsen/logrus/alt_exit.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package logrus
-
-// The following code was sourced and modified from the
-// https://bitbucket.org/tebeka/atexit package governed by the following license:
-//
-// Copyright (c) 2012 Miki Tebeka .
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-// the Software, and to permit persons to whom the Software is furnished to do so,
-// subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-import (
- "fmt"
- "os"
-)
-
-var handlers = []func(){}
-
-func runHandler(handler func()) {
- defer func() {
- if err := recover(); err != nil {
- fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
- }
- }()
-
- handler()
-}
-
-func runHandlers() {
- for _, handler := range handlers {
- runHandler(handler)
- }
-}
-
-// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
-func Exit(code int) {
- runHandlers()
- os.Exit(code)
-}
-
-// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
-// all handlers. The handlers will also be invoked when any Fatal log entry is
-// made.
-//
-// This method is useful when a caller wishes to use logrus to log a fatal
-// message but also needs to gracefully shutdown. An example usecase could be
-// closing database connections, or sending a alert that the application is
-// closing.
-func RegisterExitHandler(handler func()) {
- handlers = append(handlers, handler)
-}
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
deleted file mode 100644
index dddd5f8..0000000
--- a/vendor/github.com/Sirupsen/logrus/doc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
-Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
-
-
-The simplest way to use Logrus is simply the package-level exported logger:
-
- package main
-
- import (
- log "github.com/Sirupsen/logrus"
- )
-
- func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- "number": 1,
- "size": 10,
- }).Info("A walrus appears")
- }
-
-Output:
- time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
-
-For a full guide visit https://github.com/Sirupsen/logrus
-*/
-package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
deleted file mode 100644
index 4edbe7a..0000000
--- a/vendor/github.com/Sirupsen/logrus/entry.go
+++ /dev/null
@@ -1,275 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "fmt"
- "os"
- "sync"
- "time"
-)
-
-var bufferPool *sync.Pool
-
-func init() {
- bufferPool = &sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
- }
-}
-
-// Defines the key when adding errors using WithError.
-var ErrorKey = "error"
-
-// An entry is the final or intermediate Logrus logging entry. It contains all
-// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
-// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
-// passed around as much as you wish to avoid field duplication.
-type Entry struct {
- Logger *Logger
-
- // Contains all the fields set by the user.
- Data Fields
-
- // Time at which the log entry was created
- Time time.Time
-
- // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
- Level Level
-
- // Message passed to Debug, Info, Warn, Error, Fatal or Panic
- Message string
-
- // When formatter is called in entry.log(), an Buffer may be set to entry
- Buffer *bytes.Buffer
-}
-
-func NewEntry(logger *Logger) *Entry {
- return &Entry{
- Logger: logger,
- // Default is three fields, give a little extra room
- Data: make(Fields, 5),
- }
-}
-
-// Returns the string representation from the reader and ultimately the
-// formatter.
-func (entry *Entry) String() (string, error) {
- serialized, err := entry.Logger.Formatter.Format(entry)
- if err != nil {
- return "", err
- }
- str := string(serialized)
- return str, nil
-}
-
-// Add an error as single field (using the key defined in ErrorKey) to the Entry.
-func (entry *Entry) WithError(err error) *Entry {
- return entry.WithField(ErrorKey, err)
-}
-
-// Add a single field to the Entry.
-func (entry *Entry) WithField(key string, value interface{}) *Entry {
- return entry.WithFields(Fields{key: value})
-}
-
-// Add a map of fields to the Entry.
-func (entry *Entry) WithFields(fields Fields) *Entry {
- data := make(Fields, len(entry.Data)+len(fields))
- for k, v := range entry.Data {
- data[k] = v
- }
- for k, v := range fields {
- data[k] = v
- }
- return &Entry{Logger: entry.Logger, Data: data}
-}
-
-// This function is not declared with a pointer value because otherwise
-// race conditions will occur when using multiple goroutines
-func (entry Entry) log(level Level, msg string) {
- var buffer *bytes.Buffer
- entry.Time = time.Now()
- entry.Level = level
- entry.Message = msg
-
- if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
- entry.Logger.mu.Lock()
- fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
- entry.Logger.mu.Unlock()
- }
- buffer = bufferPool.Get().(*bytes.Buffer)
- buffer.Reset()
- defer bufferPool.Put(buffer)
- entry.Buffer = buffer
- serialized, err := entry.Logger.Formatter.Format(&entry)
- entry.Buffer = nil
- if err != nil {
- entry.Logger.mu.Lock()
- fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
- entry.Logger.mu.Unlock()
- } else {
- entry.Logger.mu.Lock()
- _, err = entry.Logger.Out.Write(serialized)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
- }
- entry.Logger.mu.Unlock()
- }
-
- // To avoid Entry#log() returning a value that only would make sense for
- // panic() to use in Entry#Panic(), we avoid the allocation by checking
- // directly here.
- if level <= PanicLevel {
- panic(&entry)
- }
-}
-
-func (entry *Entry) Debug(args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
- entry.log(DebugLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Print(args ...interface{}) {
- entry.Info(args...)
-}
-
-func (entry *Entry) Info(args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
- entry.log(InfoLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Warn(args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
- entry.log(WarnLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Warning(args ...interface{}) {
- entry.Warn(args...)
-}
-
-func (entry *Entry) Error(args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
- entry.log(ErrorLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Fatal(args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
- entry.log(FatalLevel, fmt.Sprint(args...))
- }
- Exit(1)
-}
-
-func (entry *Entry) Panic(args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
- entry.log(PanicLevel, fmt.Sprint(args...))
- }
- panic(fmt.Sprint(args...))
-}
-
-// Entry Printf family functions
-
-func (entry *Entry) Debugf(format string, args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
- entry.Debug(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Infof(format string, args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
- entry.Info(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Printf(format string, args ...interface{}) {
- entry.Infof(format, args...)
-}
-
-func (entry *Entry) Warnf(format string, args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
- entry.Warn(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Warningf(format string, args ...interface{}) {
- entry.Warnf(format, args...)
-}
-
-func (entry *Entry) Errorf(format string, args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
- entry.Error(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Fatalf(format string, args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
- entry.Fatal(fmt.Sprintf(format, args...))
- }
- Exit(1)
-}
-
-func (entry *Entry) Panicf(format string, args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
- entry.Panic(fmt.Sprintf(format, args...))
- }
-}
-
-// Entry Println family functions
-
-func (entry *Entry) Debugln(args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
- entry.Debug(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Infoln(args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
- entry.Info(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Println(args ...interface{}) {
- entry.Infoln(args...)
-}
-
-func (entry *Entry) Warnln(args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
- entry.Warn(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Warningln(args ...interface{}) {
- entry.Warnln(args...)
-}
-
-func (entry *Entry) Errorln(args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
- entry.Error(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Fatalln(args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
- entry.Fatal(entry.sprintlnn(args...))
- }
- Exit(1)
-}
-
-func (entry *Entry) Panicln(args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
- entry.Panic(entry.sprintlnn(args...))
- }
-}
-
-// Sprintlnn => Sprint no newline. This is to get the behavior of how
-// fmt.Sprintln where spaces are always added between operands, regardless of
-// their type. Instead of vendoring the Sprintln implementation to spare a
-// string allocation, we do the simplest thing.
-func (entry *Entry) sprintlnn(args ...interface{}) string {
- msg := fmt.Sprintln(args...)
- return msg[:len(msg)-1]
-}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
deleted file mode 100644
index 9a0120a..0000000
--- a/vendor/github.com/Sirupsen/logrus/exported.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package logrus
-
-import (
- "io"
-)
-
-var (
- // std is the name of the standard logger in stdlib `log`
- std = New()
-)
-
-func StandardLogger() *Logger {
- return std
-}
-
-// SetOutput sets the standard logger output.
-func SetOutput(out io.Writer) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Out = out
-}
-
-// SetFormatter sets the standard logger formatter.
-func SetFormatter(formatter Formatter) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Formatter = formatter
-}
-
-// SetLevel sets the standard logger level.
-func SetLevel(level Level) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Level = level
-}
-
-// GetLevel returns the standard logger level.
-func GetLevel() Level {
- std.mu.Lock()
- defer std.mu.Unlock()
- return std.Level
-}
-
-// AddHook adds a hook to the standard logger hooks.
-func AddHook(hook Hook) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Hooks.Add(hook)
-}
-
-// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
-func WithError(err error) *Entry {
- return std.WithField(ErrorKey, err)
-}
-
-// WithField creates an entry from the standard logger and adds a field to
-// it. If you want multiple fields, use `WithFields`.
-//
-// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
-// or Panic on the Entry it returns.
-func WithField(key string, value interface{}) *Entry {
- return std.WithField(key, value)
-}
-
-// WithFields creates an entry from the standard logger and adds multiple
-// fields to it. This is simply a helper for `WithField`, invoking it
-// once for each field.
-//
-// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
-// or Panic on the Entry it returns.
-func WithFields(fields Fields) *Entry {
- return std.WithFields(fields)
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func Debug(args ...interface{}) {
- std.Debug(args...)
-}
-
-// Print logs a message at level Info on the standard logger.
-func Print(args ...interface{}) {
- std.Print(args...)
-}
-
-// Info logs a message at level Info on the standard logger.
-func Info(args ...interface{}) {
- std.Info(args...)
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func Warn(args ...interface{}) {
- std.Warn(args...)
-}
-
-// Warning logs a message at level Warn on the standard logger.
-func Warning(args ...interface{}) {
- std.Warning(args...)
-}
-
-// Error logs a message at level Error on the standard logger.
-func Error(args ...interface{}) {
- std.Error(args...)
-}
-
-// Panic logs a message at level Panic on the standard logger.
-func Panic(args ...interface{}) {
- std.Panic(args...)
-}
-
-// Fatal logs a message at level Fatal on the standard logger.
-func Fatal(args ...interface{}) {
- std.Fatal(args...)
-}
-
-// Debugf logs a message at level Debug on the standard logger.
-func Debugf(format string, args ...interface{}) {
- std.Debugf(format, args...)
-}
-
-// Printf logs a message at level Info on the standard logger.
-func Printf(format string, args ...interface{}) {
- std.Printf(format, args...)
-}
-
-// Infof logs a message at level Info on the standard logger.
-func Infof(format string, args ...interface{}) {
- std.Infof(format, args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func Warnf(format string, args ...interface{}) {
- std.Warnf(format, args...)
-}
-
-// Warningf logs a message at level Warn on the standard logger.
-func Warningf(format string, args ...interface{}) {
- std.Warningf(format, args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func Errorf(format string, args ...interface{}) {
- std.Errorf(format, args...)
-}
-
-// Panicf logs a message at level Panic on the standard logger.
-func Panicf(format string, args ...interface{}) {
- std.Panicf(format, args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger.
-func Fatalf(format string, args ...interface{}) {
- std.Fatalf(format, args...)
-}
-
-// Debugln logs a message at level Debug on the standard logger.
-func Debugln(args ...interface{}) {
- std.Debugln(args...)
-}
-
-// Println logs a message at level Info on the standard logger.
-func Println(args ...interface{}) {
- std.Println(args...)
-}
-
-// Infoln logs a message at level Info on the standard logger.
-func Infoln(args ...interface{}) {
- std.Infoln(args...)
-}
-
-// Warnln logs a message at level Warn on the standard logger.
-func Warnln(args ...interface{}) {
- std.Warnln(args...)
-}
-
-// Warningln logs a message at level Warn on the standard logger.
-func Warningln(args ...interface{}) {
- std.Warningln(args...)
-}
-
-// Errorln logs a message at level Error on the standard logger.
-func Errorln(args ...interface{}) {
- std.Errorln(args...)
-}
-
-// Panicln logs a message at level Panic on the standard logger.
-func Panicln(args ...interface{}) {
- std.Panicln(args...)
-}
-
-// Fatalln logs a message at level Fatal on the standard logger.
-func Fatalln(args ...interface{}) {
- std.Fatalln(args...)
-}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
deleted file mode 100644
index b5fbe93..0000000
--- a/vendor/github.com/Sirupsen/logrus/formatter.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package logrus
-
-import "time"
-
-const DefaultTimestampFormat = time.RFC3339
-
-// The Formatter interface is used to implement a custom Formatter. It takes an
-// `Entry`. It exposes all the fields, including the default ones:
-//
-// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
-// * `entry.Data["time"]`. The timestamp.
-// * `entry.Data["level"]. The level the entry was logged at.
-//
-// Any additional fields added with `WithField` or `WithFields` are also in
-// `entry.Data`. Format is expected to return an array of bytes which are then
-// logged to `logger.Out`.
-type Formatter interface {
- Format(*Entry) ([]byte, error)
-}
-
-// This is to not silently overwrite `time`, `msg` and `level` fields when
-// dumping it. If this code wasn't there doing:
-//
-// logrus.WithField("level", 1).Info("hello")
-//
-// Would just silently drop the user provided level. Instead with this code
-// it'll logged as:
-//
-// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
-//
-// It's not exported because it's still using Data in an opinionated way. It's to
-// avoid code duplication between the two default formatters.
-func prefixFieldClashes(data Fields) {
- if t, ok := data["time"]; ok {
- data["fields.time"] = t
- }
-
- if m, ok := data["msg"]; ok {
- data["fields.msg"] = m
- }
-
- if l, ok := data["level"]; ok {
- data["fields.level"] = l
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
deleted file mode 100644
index 3f151cd..0000000
--- a/vendor/github.com/Sirupsen/logrus/hooks.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package logrus
-
-// A hook to be fired when logging on the logging levels returned from
-// `Levels()` on your implementation of the interface. Note that this is not
-// fired in a goroutine or a channel with workers, you should handle such
-// functionality yourself if your call is non-blocking and you don't wish for
-// the logging calls for levels returned from `Levels()` to block.
-type Hook interface {
- Levels() []Level
- Fire(*Entry) error
-}
-
-// Internal type for storing the hooks on a logger instance.
-type LevelHooks map[Level][]Hook
-
-// Add a hook to an instance of logger. This is called with
-// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
-func (hooks LevelHooks) Add(hook Hook) {
- for _, level := range hook.Levels() {
- hooks[level] = append(hooks[level], hook)
- }
-}
-
-// Fire all the hooks for the passed level. Used by `entry.log` to fire
-// appropriate hooks for a log entry.
-func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
- for _, hook := range hooks[level] {
- if err := hook.Fire(entry); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
deleted file mode 100644
index 266554e..0000000
--- a/vendor/github.com/Sirupsen/logrus/json_formatter.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package logrus
-
-import (
- "encoding/json"
- "fmt"
-)
-
-type fieldKey string
-type FieldMap map[fieldKey]string
-
-const (
- FieldKeyMsg = "msg"
- FieldKeyLevel = "level"
- FieldKeyTime = "time"
-)
-
-func (f FieldMap) resolve(key fieldKey) string {
- if k, ok := f[key]; ok {
- return k
- }
-
- return string(key)
-}
-
-type JSONFormatter struct {
- // TimestampFormat sets the format used for marshaling timestamps.
- TimestampFormat string
-
- // DisableTimestamp allows disabling automatic timestamps in output
- DisableTimestamp bool
-
- // FieldMap allows users to customize the names of keys for various fields.
- // As an example:
- // formatter := &JSONFormatter{
- // FieldMap: FieldMap{
- // FieldKeyTime: "@timestamp",
- // FieldKeyLevel: "@level",
- // FieldKeyLevel: "@message",
- // },
- // }
- FieldMap FieldMap
-}
-
-func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
- data := make(Fields, len(entry.Data)+3)
- for k, v := range entry.Data {
- switch v := v.(type) {
- case error:
- // Otherwise errors are ignored by `encoding/json`
- // https://github.com/Sirupsen/logrus/issues/137
- data[k] = v.Error()
- default:
- data[k] = v
- }
- }
- prefixFieldClashes(data)
-
- timestampFormat := f.TimestampFormat
- if timestampFormat == "" {
- timestampFormat = DefaultTimestampFormat
- }
-
- if !f.DisableTimestamp {
- data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
- }
- data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
- data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
-
- serialized, err := json.Marshal(data)
- if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
- }
- return append(serialized, '\n'), nil
-}
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
deleted file mode 100644
index b769f3d..0000000
--- a/vendor/github.com/Sirupsen/logrus/logger.go
+++ /dev/null
@@ -1,308 +0,0 @@
-package logrus
-
-import (
- "io"
- "os"
- "sync"
-)
-
-type Logger struct {
- // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
- // file, or leave it default which is `os.Stderr`. You can also set this to
- // something more adventorous, such as logging to Kafka.
- Out io.Writer
- // Hooks for the logger instance. These allow firing events based on logging
- // levels and log entries. For example, to send errors to an error tracking
- // service, log to StatsD or dump the core on fatal errors.
- Hooks LevelHooks
- // All log entries pass through the formatter before logged to Out. The
- // included formatters are `TextFormatter` and `JSONFormatter` for which
- // TextFormatter is the default. In development (when a TTY is attached) it
- // logs with colors, but to a file it wouldn't. You can easily implement your
- // own that implements the `Formatter` interface, see the `README` or included
- // formatters for examples.
- Formatter Formatter
- // The logging level the logger should log at. This is typically (and defaults
- // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
- // logged. `logrus.Debug` is useful in
- Level Level
- // Used to sync writing to the log. Locking is enabled by Default
- mu MutexWrap
- // Reusable empty entry
- entryPool sync.Pool
-}
-
-type MutexWrap struct {
- lock sync.Mutex
- disabled bool
-}
-
-func (mw *MutexWrap) Lock() {
- if !mw.disabled {
- mw.lock.Lock()
- }
-}
-
-func (mw *MutexWrap) Unlock() {
- if !mw.disabled {
- mw.lock.Unlock()
- }
-}
-
-func (mw *MutexWrap) Disable() {
- mw.disabled = true
-}
-
-// Creates a new logger. Configuration should be set by changing `Formatter`,
-// `Out` and `Hooks` directly on the default logger instance. You can also just
-// instantiate your own:
-//
-// var log = &Logger{
-// Out: os.Stderr,
-// Formatter: new(JSONFormatter),
-// Hooks: make(LevelHooks),
-// Level: logrus.DebugLevel,
-// }
-//
-// It's recommended to make this a global instance called `log`.
-func New() *Logger {
- return &Logger{
- Out: os.Stderr,
- Formatter: new(TextFormatter),
- Hooks: make(LevelHooks),
- Level: InfoLevel,
- }
-}
-
-func (logger *Logger) newEntry() *Entry {
- entry, ok := logger.entryPool.Get().(*Entry)
- if ok {
- return entry
- }
- return NewEntry(logger)
-}
-
-func (logger *Logger) releaseEntry(entry *Entry) {
- logger.entryPool.Put(entry)
-}
-
-// Adds a field to the log entry, note that it doesn't log until you call
-// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
-// If you want multiple fields, use `WithFields`.
-func (logger *Logger) WithField(key string, value interface{}) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithField(key, value)
-}
-
-// Adds a struct of fields to the log entry. All it does is call `WithField` for
-// each `Field`.
-func (logger *Logger) WithFields(fields Fields) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithFields(fields)
-}
-
-// Add an error as single field to the log entry. All it does is call
-// `WithError` for the given `error`.
-func (logger *Logger) WithError(err error) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithError(err)
-}
-
-func (logger *Logger) Debugf(format string, args ...interface{}) {
- if logger.Level >= DebugLevel {
- entry := logger.newEntry()
- entry.Debugf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Infof(format string, args ...interface{}) {
- if logger.Level >= InfoLevel {
- entry := logger.newEntry()
- entry.Infof(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Printf(format string, args ...interface{}) {
- entry := logger.newEntry()
- entry.Printf(format, args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warnf(format string, args ...interface{}) {
- if logger.Level >= WarnLevel {
- entry := logger.newEntry()
- entry.Warnf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Warningf(format string, args ...interface{}) {
- if logger.Level >= WarnLevel {
- entry := logger.newEntry()
- entry.Warnf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Errorf(format string, args ...interface{}) {
- if logger.Level >= ErrorLevel {
- entry := logger.newEntry()
- entry.Errorf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Fatalf(format string, args ...interface{}) {
- if logger.Level >= FatalLevel {
- entry := logger.newEntry()
- entry.Fatalf(format, args...)
- logger.releaseEntry(entry)
- }
- Exit(1)
-}
-
-func (logger *Logger) Panicf(format string, args ...interface{}) {
- if logger.Level >= PanicLevel {
- entry := logger.newEntry()
- entry.Panicf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Debug(args ...interface{}) {
- if logger.Level >= DebugLevel {
- entry := logger.newEntry()
- entry.Debug(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Info(args ...interface{}) {
- if logger.Level >= InfoLevel {
- entry := logger.newEntry()
- entry.Info(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Print(args ...interface{}) {
- entry := logger.newEntry()
- entry.Info(args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warn(args ...interface{}) {
- if logger.Level >= WarnLevel {
- entry := logger.newEntry()
- entry.Warn(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Warning(args ...interface{}) {
- if logger.Level >= WarnLevel {
- entry := logger.newEntry()
- entry.Warn(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Error(args ...interface{}) {
- if logger.Level >= ErrorLevel {
- entry := logger.newEntry()
- entry.Error(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Fatal(args ...interface{}) {
- if logger.Level >= FatalLevel {
- entry := logger.newEntry()
- entry.Fatal(args...)
- logger.releaseEntry(entry)
- }
- Exit(1)
-}
-
-func (logger *Logger) Panic(args ...interface{}) {
- if logger.Level >= PanicLevel {
- entry := logger.newEntry()
- entry.Panic(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Debugln(args ...interface{}) {
- if logger.Level >= DebugLevel {
- entry := logger.newEntry()
- entry.Debugln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Infoln(args ...interface{}) {
- if logger.Level >= InfoLevel {
- entry := logger.newEntry()
- entry.Infoln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Println(args ...interface{}) {
- entry := logger.newEntry()
- entry.Println(args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warnln(args ...interface{}) {
- if logger.Level >= WarnLevel {
- entry := logger.newEntry()
- entry.Warnln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Warningln(args ...interface{}) {
- if logger.Level >= WarnLevel {
- entry := logger.newEntry()
- entry.Warnln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Errorln(args ...interface{}) {
- if logger.Level >= ErrorLevel {
- entry := logger.newEntry()
- entry.Errorln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Fatalln(args ...interface{}) {
- if logger.Level >= FatalLevel {
- entry := logger.newEntry()
- entry.Fatalln(args...)
- logger.releaseEntry(entry)
- }
- Exit(1)
-}
-
-func (logger *Logger) Panicln(args ...interface{}) {
- if logger.Level >= PanicLevel {
- entry := logger.newEntry()
- entry.Panicln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-//When file is opened with appending mode, it's safe to
-//write concurrently to a file (within 4k message on Linux).
-//In these cases user can choose to disable the lock.
-func (logger *Logger) SetNoLock() {
- logger.mu.Disable()
-}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
deleted file mode 100644
index e596691..0000000
--- a/vendor/github.com/Sirupsen/logrus/logrus.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package logrus
-
-import (
- "fmt"
- "log"
- "strings"
-)
-
-// Fields type, used to pass to `WithFields`.
-type Fields map[string]interface{}
-
-// Level type
-type Level uint8
-
-// Convert the Level to a string. E.g. PanicLevel becomes "panic".
-func (level Level) String() string {
- switch level {
- case DebugLevel:
- return "debug"
- case InfoLevel:
- return "info"
- case WarnLevel:
- return "warning"
- case ErrorLevel:
- return "error"
- case FatalLevel:
- return "fatal"
- case PanicLevel:
- return "panic"
- }
-
- return "unknown"
-}
-
-// ParseLevel takes a string level and returns the Logrus log level constant.
-func ParseLevel(lvl string) (Level, error) {
- switch strings.ToLower(lvl) {
- case "panic":
- return PanicLevel, nil
- case "fatal":
- return FatalLevel, nil
- case "error":
- return ErrorLevel, nil
- case "warn", "warning":
- return WarnLevel, nil
- case "info":
- return InfoLevel, nil
- case "debug":
- return DebugLevel, nil
- }
-
- var l Level
- return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
-}
-
-// A constant exposing all logging levels
-var AllLevels = []Level{
- PanicLevel,
- FatalLevel,
- ErrorLevel,
- WarnLevel,
- InfoLevel,
- DebugLevel,
-}
-
-// These are the different logging levels. You can set the logging level to log
-// on your instance of logger, obtained with `logrus.New()`.
-const (
- // PanicLevel level, highest level of severity. Logs and then calls panic with the
- // message passed to Debug, Info, ...
- PanicLevel Level = iota
- // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
- // logging level is set to Panic.
- FatalLevel
- // ErrorLevel level. Logs. Used for errors that should definitely be noted.
- // Commonly used for hooks to send errors to an error tracking service.
- ErrorLevel
- // WarnLevel level. Non-critical entries that deserve eyes.
- WarnLevel
- // InfoLevel level. General operational entries about what's going on inside the
- // application.
- InfoLevel
- // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
- DebugLevel
-)
-
-// Won't compile if StdLogger can't be realized by a log.Logger
-var (
- _ StdLogger = &log.Logger{}
- _ StdLogger = &Entry{}
- _ StdLogger = &Logger{}
-)
-
-// StdLogger is what your logrus-enabled library should take, that way
-// it'll accept a stdlib logger and a logrus logger. There's no standard
-// interface, this is the closest we get, unfortunately.
-type StdLogger interface {
- Print(...interface{})
- Printf(string, ...interface{})
- Println(...interface{})
-
- Fatal(...interface{})
- Fatalf(string, ...interface{})
- Fatalln(...interface{})
-
- Panic(...interface{})
- Panicf(string, ...interface{})
- Panicln(...interface{})
-}
-
-// The FieldLogger interface generalizes the Entry and Logger types
-type FieldLogger interface {
- WithField(key string, value interface{}) *Entry
- WithFields(fields Fields) *Entry
- WithError(err error) *Entry
-
- Debugf(format string, args ...interface{})
- Infof(format string, args ...interface{})
- Printf(format string, args ...interface{})
- Warnf(format string, args ...interface{})
- Warningf(format string, args ...interface{})
- Errorf(format string, args ...interface{})
- Fatalf(format string, args ...interface{})
- Panicf(format string, args ...interface{})
-
- Debug(args ...interface{})
- Info(args ...interface{})
- Print(args ...interface{})
- Warn(args ...interface{})
- Warning(args ...interface{})
- Error(args ...interface{})
- Fatal(args ...interface{})
- Panic(args ...interface{})
-
- Debugln(args ...interface{})
- Infoln(args ...interface{})
- Println(args ...interface{})
- Warnln(args ...interface{})
- Warningln(args ...interface{})
- Errorln(args ...interface{})
- Fatalln(args ...interface{})
- Panicln(args ...interface{})
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
deleted file mode 100644
index 1960169..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build appengine
-
-package logrus
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal() bool {
- return true
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
deleted file mode 100644
index 5f6be4d..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package logrus
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
deleted file mode 100644
index 308160c..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package logrus
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TCGETS
-
-type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
deleted file mode 100644
index 329038f..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package logrus
-
-import (
- "syscall"
- "unsafe"
-)
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal() bool {
- fd := syscall.Stderr
- var termios Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
deleted file mode 100644
index a3c6f6e..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build solaris,!appengine
-
-package logrus
-
-import (
- "os"
-
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal() bool {
- _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
- return err == nil
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
deleted file mode 100644
index 3727e8a..0000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows,!appengine
-
-package logrus
-
-import (
- "syscall"
- "unsafe"
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
-)
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal() bool {
- fd := syscall.Stderr
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
deleted file mode 100644
index 9114b3c..0000000
--- a/vendor/github.com/Sirupsen/logrus/text_formatter.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "sort"
- "strings"
- "time"
-)
-
-const (
- nocolor = 0
- red = 31
- green = 32
- yellow = 33
- blue = 34
- gray = 37
-)
-
-var (
- baseTimestamp time.Time
- isTerminal bool
-)
-
-func init() {
- baseTimestamp = time.Now()
- isTerminal = IsTerminal()
-}
-
-func miniTS() int {
- return int(time.Since(baseTimestamp) / time.Second)
-}
-
-type TextFormatter struct {
- // Set to true to bypass checking for a TTY before outputting colors.
- ForceColors bool
-
- // Force disabling colors.
- DisableColors bool
-
- // Disable timestamp logging. useful when output is redirected to logging
- // system that already adds timestamps.
- DisableTimestamp bool
-
- // Enable logging the full timestamp when a TTY is attached instead of just
- // the time passed since beginning of execution.
- FullTimestamp bool
-
- // TimestampFormat to use for display when a full timestamp is printed
- TimestampFormat string
-
- // The fields are sorted by default for a consistent output. For applications
- // that log extremely frequently and don't use the JSON formatter this may not
- // be desired.
- DisableSorting bool
-}
-
-func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
- var b *bytes.Buffer
- var keys []string = make([]string, 0, len(entry.Data))
- for k := range entry.Data {
- keys = append(keys, k)
- }
-
- if !f.DisableSorting {
- sort.Strings(keys)
- }
- if entry.Buffer != nil {
- b = entry.Buffer
- } else {
- b = &bytes.Buffer{}
- }
-
- prefixFieldClashes(entry.Data)
-
- isColorTerminal := isTerminal && (runtime.GOOS != "windows")
- isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
-
- timestampFormat := f.TimestampFormat
- if timestampFormat == "" {
- timestampFormat = DefaultTimestampFormat
- }
- if isColored {
- f.printColored(b, entry, keys, timestampFormat)
- } else {
- if !f.DisableTimestamp {
- f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
- }
- f.appendKeyValue(b, "level", entry.Level.String())
- if entry.Message != "" {
- f.appendKeyValue(b, "msg", entry.Message)
- }
- for _, key := range keys {
- f.appendKeyValue(b, key, entry.Data[key])
- }
- }
-
- b.WriteByte('\n')
- return b.Bytes(), nil
-}
-
-func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
- var levelColor int
- switch entry.Level {
- case DebugLevel:
- levelColor = gray
- case WarnLevel:
- levelColor = yellow
- case ErrorLevel, FatalLevel, PanicLevel:
- levelColor = red
- default:
- levelColor = blue
- }
-
- levelText := strings.ToUpper(entry.Level.String())[0:4]
-
- if !f.FullTimestamp {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
- } else {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
- }
- for _, k := range keys {
- v := entry.Data[k]
- fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
- f.appendValue(b, v)
- }
-}
-
-func needsQuoting(text string) bool {
- for _, ch := range text {
- if !((ch >= 'a' && ch <= 'z') ||
- (ch >= 'A' && ch <= 'Z') ||
- (ch >= '0' && ch <= '9') ||
- ch == '-' || ch == '.') {
- return true
- }
- }
- return false
-}
-
-func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
-
- b.WriteString(key)
- b.WriteByte('=')
- f.appendValue(b, value)
- b.WriteByte(' ')
-}
-
-func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
- switch value := value.(type) {
- case string:
- if !needsQuoting(value) {
- b.WriteString(value)
- } else {
- fmt.Fprintf(b, "%q", value)
- }
- case error:
- errmsg := value.Error()
- if !needsQuoting(errmsg) {
- b.WriteString(errmsg)
- } else {
- fmt.Fprintf(b, "%q", errmsg)
- }
- default:
- fmt.Fprint(b, value)
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
deleted file mode 100644
index f74d2aa..0000000
--- a/vendor/github.com/Sirupsen/logrus/writer.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package logrus
-
-import (
- "bufio"
- "io"
- "runtime"
-)
-
-func (logger *Logger) Writer() *io.PipeWriter {
- return logger.WriterLevel(InfoLevel)
-}
-
-func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
- reader, writer := io.Pipe()
-
- var printFunc func(args ...interface{})
- switch level {
- case DebugLevel:
- printFunc = logger.Debug
- case InfoLevel:
- printFunc = logger.Info
- case WarnLevel:
- printFunc = logger.Warn
- case ErrorLevel:
- printFunc = logger.Error
- case FatalLevel:
- printFunc = logger.Fatal
- case PanicLevel:
- printFunc = logger.Panic
- default:
- printFunc = logger.Print
- }
-
- go logger.writerScanner(reader, printFunc)
- runtime.SetFinalizer(writer, writerFinalizer)
-
- return writer
-}
-
-func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
- scanner := bufio.NewScanner(reader)
- for scanner.Scan() {
- printFunc(scanner.Text())
- }
- if err := scanner.Err(); err != nil {
- logger.Errorf("Error while reading from Writer: %s", err)
- }
- reader.Close()
-}
-
-func writerFinalizer(writer *io.PipeWriter) {
- writer.Close()
-}
diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE
deleted file mode 100644
index e06d208..0000000
--- a/vendor/github.com/docker/distribution/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go
deleted file mode 100644
index 31d821b..0000000
--- a/vendor/github.com/docker/distribution/digest/digest.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package digest
-
-import (
- "fmt"
- "hash"
- "io"
- "regexp"
- "strings"
-)
-
-const (
- // DigestSha256EmptyTar is the canonical sha256 digest of empty data
- DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
-)
-
-// Digest allows simple protection of hex formatted digest strings, prefixed
-// by their algorithm. Strings of type Digest have some guarantee of being in
-// the correct format and it provides quick access to the components of a
-// digest string.
-//
-// The following is an example of the contents of Digest types:
-//
-// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
-//
-// This allows to abstract the digest behind this type and work only in those
-// terms.
-type Digest string
-
-// NewDigest returns a Digest from alg and a hash.Hash object.
-func NewDigest(alg Algorithm, h hash.Hash) Digest {
- return NewDigestFromBytes(alg, h.Sum(nil))
-}
-
-// NewDigestFromBytes returns a new digest from the byte contents of p.
-// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
-// functions. This is also useful for rebuilding digests from binary
-// serializations.
-func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
- return Digest(fmt.Sprintf("%s:%x", alg, p))
-}
-
-// NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
-func NewDigestFromHex(alg, hex string) Digest {
- return Digest(fmt.Sprintf("%s:%s", alg, hex))
-}
-
-// DigestRegexp matches valid digest types.
-var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`)
-
-// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
-var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
-
-var (
- // ErrDigestInvalidFormat returned when digest format invalid.
- ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
-
- // ErrDigestInvalidLength returned when digest has invalid length.
- ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
-
- // ErrDigestUnsupported returned when the digest algorithm is unsupported.
- ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
-)
-
-// ParseDigest parses s and returns the validated digest object. An error will
-// be returned if the format is invalid.
-func ParseDigest(s string) (Digest, error) {
- d := Digest(s)
-
- return d, d.Validate()
-}
-
-// FromReader returns the most valid digest for the underlying content using
-// the canonical digest algorithm.
-func FromReader(rd io.Reader) (Digest, error) {
- return Canonical.FromReader(rd)
-}
-
-// FromBytes digests the input and returns a Digest.
-func FromBytes(p []byte) Digest {
- return Canonical.FromBytes(p)
-}
-
-// Validate checks that the contents of d is a valid digest, returning an
-// error if not.
-func (d Digest) Validate() error {
- s := string(d)
-
- if !DigestRegexpAnchored.MatchString(s) {
- return ErrDigestInvalidFormat
- }
-
- i := strings.Index(s, ":")
- if i < 0 {
- return ErrDigestInvalidFormat
- }
-
- // case: "sha256:" with no hex.
- if i+1 == len(s) {
- return ErrDigestInvalidFormat
- }
-
- switch algorithm := Algorithm(s[:i]); algorithm {
- case SHA256, SHA384, SHA512:
- if algorithm.Size()*2 != len(s[i+1:]) {
- return ErrDigestInvalidLength
- }
- break
- default:
- return ErrDigestUnsupported
- }
-
- return nil
-}
-
-// Algorithm returns the algorithm portion of the digest. This will panic if
-// the underlying digest is not in a valid format.
-func (d Digest) Algorithm() Algorithm {
- return Algorithm(d[:d.sepIndex()])
-}
-
-// Hex returns the hex digest portion of the digest. This will panic if the
-// underlying digest is not in a valid format.
-func (d Digest) Hex() string {
- return string(d[d.sepIndex()+1:])
-}
-
-func (d Digest) String() string {
- return string(d)
-}
-
-func (d Digest) sepIndex() int {
- i := strings.Index(string(d), ":")
-
- if i < 0 {
- panic("could not find ':' in digest: " + d)
- }
-
- return i
-}
diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go
deleted file mode 100644
index f3105a4..0000000
--- a/vendor/github.com/docker/distribution/digest/digester.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package digest
-
-import (
- "crypto"
- "fmt"
- "hash"
- "io"
-)
-
-// Algorithm identifies and implementation of a digester by an identifier.
-// Note the that this defines both the hash algorithm used and the string
-// encoding.
-type Algorithm string
-
-// supported digest types
-const (
- SHA256 Algorithm = "sha256" // sha256 with hex encoding
- SHA384 Algorithm = "sha384" // sha384 with hex encoding
- SHA512 Algorithm = "sha512" // sha512 with hex encoding
-
- // Canonical is the primary digest algorithm used with the distribution
- // project. Other digests may be used but this one is the primary storage
- // digest.
- Canonical = SHA256
-)
-
-var (
- // TODO(stevvooe): Follow the pattern of the standard crypto package for
- // registration of digests. Effectively, we are a registerable set and
- // common symbol access.
-
- // algorithms maps values to hash.Hash implementations. Other algorithms
- // may be available but they cannot be calculated by the digest package.
- algorithms = map[Algorithm]crypto.Hash{
- SHA256: crypto.SHA256,
- SHA384: crypto.SHA384,
- SHA512: crypto.SHA512,
- }
-)
-
-// Available returns true if the digest type is available for use. If this
-// returns false, New and Hash will return nil.
-func (a Algorithm) Available() bool {
- h, ok := algorithms[a]
- if !ok {
- return false
- }
-
- // check availability of the hash, as well
- return h.Available()
-}
-
-func (a Algorithm) String() string {
- return string(a)
-}
-
-// Size returns number of bytes returned by the hash.
-func (a Algorithm) Size() int {
- h, ok := algorithms[a]
- if !ok {
- return 0
- }
- return h.Size()
-}
-
-// Set implemented to allow use of Algorithm as a command line flag.
-func (a *Algorithm) Set(value string) error {
- if value == "" {
- *a = Canonical
- } else {
- // just do a type conversion, support is queried with Available.
- *a = Algorithm(value)
- }
-
- return nil
-}
-
-// New returns a new digester for the specified algorithm. If the algorithm
-// does not have a digester implementation, nil will be returned. This can be
-// checked by calling Available before calling New.
-func (a Algorithm) New() Digester {
- return &digester{
- alg: a,
- hash: a.Hash(),
- }
-}
-
-// Hash returns a new hash as used by the algorithm. If not available, the
-// method will panic. Check Algorithm.Available() before calling.
-func (a Algorithm) Hash() hash.Hash {
- if !a.Available() {
- // NOTE(stevvooe): A missing hash is usually a programming error that
- // must be resolved at compile time. We don't import in the digest
- // package to allow users to choose their hash implementation (such as
- // when using stevvooe/resumable or a hardware accelerated package).
- //
- // Applications that may want to resolve the hash at runtime should
- // call Algorithm.Available before call Algorithm.Hash().
- panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
- }
-
- return algorithms[a].New()
-}
-
-// FromReader returns the digest of the reader using the algorithm.
-func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
- digester := a.New()
-
- if _, err := io.Copy(digester.Hash(), rd); err != nil {
- return "", err
- }
-
- return digester.Digest(), nil
-}
-
-// FromBytes digests the input and returns a Digest.
-func (a Algorithm) FromBytes(p []byte) Digest {
- digester := a.New()
-
- if _, err := digester.Hash().Write(p); err != nil {
- // Writes to a Hash should never fail. None of the existing
- // hash implementations in the stdlib or hashes vendored
- // here can return errors from Write. Having a panic in this
- // condition instead of having FromBytes return an error value
- // avoids unnecessary error handling paths in all callers.
- panic("write to hash function returned error: " + err.Error())
- }
-
- return digester.Digest()
-}
-
-// TODO(stevvooe): Allow resolution of verifiers using the digest type and
-// this registration system.
-
-// Digester calculates the digest of written data. Writes should go directly
-// to the return value of Hash, while calling Digest will return the current
-// value of the digest.
-type Digester interface {
- Hash() hash.Hash // provides direct access to underlying hash instance.
- Digest() Digest
-}
-
-// digester provides a simple digester definition that embeds a hasher.
-type digester struct {
- alg Algorithm
- hash hash.Hash
-}
-
-func (d *digester) Hash() hash.Hash {
- return d.hash
-}
-
-func (d *digester) Digest() Digest {
- return NewDigest(d.alg, d.hash)
-}
diff --git a/vendor/github.com/docker/distribution/digest/doc.go b/vendor/github.com/docker/distribution/digest/doc.go
deleted file mode 100644
index f64b0db..0000000
--- a/vendor/github.com/docker/distribution/digest/doc.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Package digest provides a generalized type to opaquely represent message
-// digests and their operations within the registry. The Digest type is
-// designed to serve as a flexible identifier in a content-addressable system.
-// More importantly, it provides tools and wrappers to work with
-// hash.Hash-based digests with little effort.
-//
-// Basics
-//
-// The format of a digest is simply a string with two parts, dubbed the
-// "algorithm" and the "digest", separated by a colon:
-//
-// :
-//
-// An example of a sha256 digest representation follows:
-//
-// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
-//
-// In this case, the string "sha256" is the algorithm and the hex bytes are
-// the "digest".
-//
-// Because the Digest type is simply a string, once a valid Digest is
-// obtained, comparisons are cheap, quick and simple to express with the
-// standard equality operator.
-//
-// Verification
-//
-// The main benefit of using the Digest type is simple verification against a
-// given digest. The Verifier interface, modeled after the stdlib hash.Hash
-// interface, provides a common write sink for digest verification. After
-// writing is complete, calling the Verifier.Verified method will indicate
-// whether or not the stream of bytes matches the target digest.
-//
-// Missing Features
-//
-// In addition to the above, we intend to add the following features to this
-// package:
-//
-// 1. A Digester type that supports write sink digest calculation.
-//
-// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
-//
-package digest
diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go
deleted file mode 100644
index 4b9313c..0000000
--- a/vendor/github.com/docker/distribution/digest/set.go
+++ /dev/null
@@ -1,245 +0,0 @@
-package digest
-
-import (
- "errors"
- "sort"
- "strings"
- "sync"
-)
-
-var (
- // ErrDigestNotFound is used when a matching digest
- // could not be found in a set.
- ErrDigestNotFound = errors.New("digest not found")
-
- // ErrDigestAmbiguous is used when multiple digests
- // are found in a set. None of the matching digests
- // should be considered valid matches.
- ErrDigestAmbiguous = errors.New("ambiguous digest string")
-)
-
-// Set is used to hold a unique set of digests which
-// may be easily referenced by easily referenced by a string
-// representation of the digest as well as short representation.
-// The uniqueness of the short representation is based on other
-// digests in the set. If digests are omitted from this set,
-// collisions in a larger set may not be detected, therefore it
-// is important to always do short representation lookups on
-// the complete set of digests. To mitigate collisions, an
-// appropriately long short code should be used.
-type Set struct {
- mutex sync.RWMutex
- entries digestEntries
-}
-
-// NewSet creates an empty set of digests
-// which may have digests added.
-func NewSet() *Set {
- return &Set{
- entries: digestEntries{},
- }
-}
-
-// checkShortMatch checks whether two digests match as either whole
-// values or short values. This function does not test equality,
-// rather whether the second value could match against the first
-// value.
-func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool {
- if len(hex) == len(shortHex) {
- if hex != shortHex {
- return false
- }
- if len(shortAlg) > 0 && string(alg) != shortAlg {
- return false
- }
- } else if !strings.HasPrefix(hex, shortHex) {
- return false
- } else if len(shortAlg) > 0 && string(alg) != shortAlg {
- return false
- }
- return true
-}
-
-// Lookup looks for a digest matching the given string representation.
-// If no digests could be found ErrDigestNotFound will be returned
-// with an empty digest value. If multiple matches are found
-// ErrDigestAmbiguous will be returned with an empty digest value.
-func (dst *Set) Lookup(d string) (Digest, error) {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- if len(dst.entries) == 0 {
- return "", ErrDigestNotFound
- }
- var (
- searchFunc func(int) bool
- alg Algorithm
- hex string
- )
- dgst, err := ParseDigest(d)
- if err == ErrDigestInvalidFormat {
- hex = d
- searchFunc = func(i int) bool {
- return dst.entries[i].val >= d
- }
- } else {
- hex = dgst.Hex()
- alg = dgst.Algorithm()
- searchFunc = func(i int) bool {
- if dst.entries[i].val == hex {
- return dst.entries[i].alg >= alg
- }
- return dst.entries[i].val >= hex
- }
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
- return "", ErrDigestNotFound
- }
- if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
- return dst.entries[idx].digest, nil
- }
- if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
- return "", ErrDigestAmbiguous
- }
-
- return dst.entries[idx].digest, nil
-}
-
-// Add adds the given digest to the set. An error will be returned
-// if the given digest is invalid. If the digest already exists in the
-// set, this operation will be a no-op.
-func (dst *Set) Add(d Digest) error {
- if err := d.Validate(); err != nil {
- return err
- }
- dst.mutex.Lock()
- defer dst.mutex.Unlock()
- entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
- searchFunc := func(i int) bool {
- if dst.entries[i].val == entry.val {
- return dst.entries[i].alg >= entry.alg
- }
- return dst.entries[i].val >= entry.val
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- if idx == len(dst.entries) {
- dst.entries = append(dst.entries, entry)
- return nil
- } else if dst.entries[idx].digest == d {
- return nil
- }
-
- entries := append(dst.entries, nil)
- copy(entries[idx+1:], entries[idx:len(entries)-1])
- entries[idx] = entry
- dst.entries = entries
- return nil
-}
-
-// Remove removes the given digest from the set. An err will be
-// returned if the given digest is invalid. If the digest does
-// not exist in the set, this operation will be a no-op.
-func (dst *Set) Remove(d Digest) error {
- if err := d.Validate(); err != nil {
- return err
- }
- dst.mutex.Lock()
- defer dst.mutex.Unlock()
- entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
- searchFunc := func(i int) bool {
- if dst.entries[i].val == entry.val {
- return dst.entries[i].alg >= entry.alg
- }
- return dst.entries[i].val >= entry.val
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- // Not found if idx is after or value at idx is not digest
- if idx == len(dst.entries) || dst.entries[idx].digest != d {
- return nil
- }
-
- entries := dst.entries
- copy(entries[idx:], entries[idx+1:])
- entries = entries[:len(entries)-1]
- dst.entries = entries
-
- return nil
-}
-
-// All returns all the digests in the set
-func (dst *Set) All() []Digest {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- retValues := make([]Digest, len(dst.entries))
- for i := range dst.entries {
- retValues[i] = dst.entries[i].digest
- }
-
- return retValues
-}
-
-// ShortCodeTable returns a map of Digest to unique short codes. The
-// length represents the minimum value, the maximum length may be the
-// entire value of digest if uniqueness cannot be achieved without the
-// full value. This function will attempt to make short codes as short
-// as possible to be unique.
-func ShortCodeTable(dst *Set, length int) map[Digest]string {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- m := make(map[Digest]string, len(dst.entries))
- l := length
- resetIdx := 0
- for i := 0; i < len(dst.entries); i++ {
- var short string
- extended := true
- for extended {
- extended = false
- if len(dst.entries[i].val) <= l {
- short = dst.entries[i].digest.String()
- } else {
- short = dst.entries[i].val[:l]
- for j := i + 1; j < len(dst.entries); j++ {
- if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
- if j > resetIdx {
- resetIdx = j
- }
- extended = true
- } else {
- break
- }
- }
- if extended {
- l++
- }
- }
- }
- m[dst.entries[i].digest] = short
- if i >= resetIdx {
- l = length
- }
- }
- return m
-}
-
-type digestEntry struct {
- alg Algorithm
- val string
- digest Digest
-}
-
-type digestEntries []*digestEntry
-
-func (d digestEntries) Len() int {
- return len(d)
-}
-
-func (d digestEntries) Less(i, j int) bool {
- if d[i].val != d[j].val {
- return d[i].val < d[j].val
- }
- return d[i].alg < d[j].alg
-}
-
-func (d digestEntries) Swap(i, j int) {
- d[i], d[j] = d[j], d[i]
-}
diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go
deleted file mode 100644
index 9af3be1..0000000
--- a/vendor/github.com/docker/distribution/digest/verifiers.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package digest
-
-import (
- "hash"
- "io"
-)
-
-// Verifier presents a general verification interface to be used with message
-// digests and other byte stream verifications. Users instantiate a Verifier
-// from one of the various methods, write the data under test to it then check
-// the result with the Verified method.
-type Verifier interface {
- io.Writer
-
- // Verified will return true if the content written to Verifier matches
- // the digest.
- Verified() bool
-}
-
-// NewDigestVerifier returns a verifier that compares the written bytes
-// against a passed in digest.
-func NewDigestVerifier(d Digest) (Verifier, error) {
- if err := d.Validate(); err != nil {
- return nil, err
- }
-
- return hashVerifier{
- hash: d.Algorithm().Hash(),
- digest: d,
- }, nil
-}
-
-type hashVerifier struct {
- digest Digest
- hash hash.Hash
-}
-
-func (hv hashVerifier) Write(p []byte) (n int, err error) {
- return hv.hash.Write(p)
-}
-
-func (hv hashVerifier) Verified() bool {
- return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
-}
diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go
deleted file mode 100644
index dd7ee0e..0000000
--- a/vendor/github.com/docker/distribution/reference/helpers.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package reference
-
-// IsNameOnly returns true if reference only contains a repo name.
-func IsNameOnly(ref Named) bool {
- if _, ok := ref.(NamedTagged); ok {
- return false
- }
- if _, ok := ref.(Canonical); ok {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go
deleted file mode 100644
index b19a34e..0000000
--- a/vendor/github.com/docker/distribution/reference/normalize.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package reference
-
-var (
- defaultTag = "latest"
-)
-
-// EnsureTagged adds the default tag "latest" to a reference if it only has
-// a repo name.
-func EnsureTagged(ref Named) NamedTagged {
- namedTagged, ok := ref.(NamedTagged)
- if !ok {
- namedTagged, err := WithTag(ref, defaultTag)
- if err != nil {
- // Default tag must be valid, to create a NamedTagged
- // type with non-validated input the WithTag function
- // should be used instead
- panic(err)
- }
- return namedTagged
- }
- return namedTagged
-}
diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go
deleted file mode 100644
index 0278662..0000000
--- a/vendor/github.com/docker/distribution/reference/reference.go
+++ /dev/null
@@ -1,370 +0,0 @@
-// Package reference provides a general type to represent any way of referencing images within the registry.
-// Its main purpose is to abstract tags and digests (content-addressable hash).
-//
-// Grammar
-//
-// reference := name [ ":" tag ] [ "@" digest ]
-// name := [hostname '/'] component ['/' component]*
-// hostname := hostcomponent ['.' hostcomponent]* [':' port-number]
-// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
-// port-number := /[0-9]+/
-// component := alpha-numeric [separator alpha-numeric]*
-// alpha-numeric := /[a-z0-9]+/
-// separator := /[_.]|__|[-]*/
-//
-// tag := /[\w][\w.-]{0,127}/
-//
-// digest := digest-algorithm ":" digest-hex
-// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
-// digest-algorithm-separator := /[+.-_]/
-// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
-// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
-package reference
-
-import (
- "errors"
- "fmt"
- "path"
- "strings"
-
- "github.com/docker/distribution/digest"
-)
-
-const (
- // NameTotalLengthMax is the maximum total number of characters in a repository name.
- NameTotalLengthMax = 255
-)
-
-var (
- // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
- ErrReferenceInvalidFormat = errors.New("invalid reference format")
-
- // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
- ErrTagInvalidFormat = errors.New("invalid tag format")
-
- // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
- ErrDigestInvalidFormat = errors.New("invalid digest format")
-
- // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
- ErrNameContainsUppercase = errors.New("repository name must be lowercase")
-
- // ErrNameEmpty is returned for empty, invalid repository names.
- ErrNameEmpty = errors.New("repository name must have at least one component")
-
- // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
- ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
-)
-
-// Reference is an opaque object reference identifier that may include
-// modifiers such as a hostname, name, tag, and digest.
-type Reference interface {
- // String returns the full reference
- String() string
-}
-
-// Field provides a wrapper type for resolving correct reference types when
-// working with encoding.
-type Field struct {
- reference Reference
-}
-
-// AsField wraps a reference in a Field for encoding.
-func AsField(reference Reference) Field {
- return Field{reference}
-}
-
-// Reference unwraps the reference type from the field to
-// return the Reference object. This object should be
-// of the appropriate type to further check for different
-// reference types.
-func (f Field) Reference() Reference {
- return f.reference
-}
-
-// MarshalText serializes the field to byte text which
-// is the string of the reference.
-func (f Field) MarshalText() (p []byte, err error) {
- return []byte(f.reference.String()), nil
-}
-
-// UnmarshalText parses text bytes by invoking the
-// reference parser to ensure the appropriately
-// typed reference object is wrapped by field.
-func (f *Field) UnmarshalText(p []byte) error {
- r, err := Parse(string(p))
- if err != nil {
- return err
- }
-
- f.reference = r
- return nil
-}
-
-// Named is an object with a full name
-type Named interface {
- Reference
- Name() string
-}
-
-// Tagged is an object which has a tag
-type Tagged interface {
- Reference
- Tag() string
-}
-
-// NamedTagged is an object including a name and tag.
-type NamedTagged interface {
- Named
- Tag() string
-}
-
-// Digested is an object which has a digest
-// in which it can be referenced by
-type Digested interface {
- Reference
- Digest() digest.Digest
-}
-
-// Canonical reference is an object with a fully unique
-// name including a name with hostname and digest
-type Canonical interface {
- Named
- Digest() digest.Digest
-}
-
-// SplitHostname splits a named reference into a
-// hostname and name string. If no valid hostname is
-// found, the hostname is empty and the full value
-// is returned as name
-func SplitHostname(named Named) (string, string) {
- name := named.Name()
- match := anchoredNameRegexp.FindStringSubmatch(name)
- if len(match) != 3 {
- return "", name
- }
- return match[1], match[2]
-}
-
-// Parse parses s and returns a syntactically valid Reference.
-// If an error was encountered it is returned, along with a nil Reference.
-// NOTE: Parse will not handle short digests.
-func Parse(s string) (Reference, error) {
- matches := ReferenceRegexp.FindStringSubmatch(s)
- if matches == nil {
- if s == "" {
- return nil, ErrNameEmpty
- }
- if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
- return nil, ErrNameContainsUppercase
- }
- return nil, ErrReferenceInvalidFormat
- }
-
- if len(matches[1]) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
- ref := reference{
- name: matches[1],
- tag: matches[2],
- }
- if matches[3] != "" {
- var err error
- ref.digest, err = digest.ParseDigest(matches[3])
- if err != nil {
- return nil, err
- }
- }
-
- r := getBestReferenceType(ref)
- if r == nil {
- return nil, ErrNameEmpty
- }
-
- return r, nil
-}
-
-// ParseNamed parses s and returns a syntactically valid reference implementing
-// the Named interface. The reference must have a name, otherwise an error is
-// returned.
-// If an error was encountered it is returned, along with a nil Reference.
-// NOTE: ParseNamed will not handle short digests.
-func ParseNamed(s string) (Named, error) {
- ref, err := Parse(s)
- if err != nil {
- return nil, err
- }
- named, isNamed := ref.(Named)
- if !isNamed {
- return nil, fmt.Errorf("reference %s has no name", ref.String())
- }
- return named, nil
-}
-
-// WithName returns a named object representing the given string. If the input
-// is invalid ErrReferenceInvalidFormat will be returned.
-func WithName(name string) (Named, error) {
- if len(name) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
- if !anchoredNameRegexp.MatchString(name) {
- return nil, ErrReferenceInvalidFormat
- }
- return repository(name), nil
-}
-
-// WithTag combines the name from "name" and the tag from "tag" to form a
-// reference incorporating both the name and the tag.
-func WithTag(name Named, tag string) (NamedTagged, error) {
- if !anchoredTagRegexp.MatchString(tag) {
- return nil, ErrTagInvalidFormat
- }
- if canonical, ok := name.(Canonical); ok {
- return reference{
- name: name.Name(),
- tag: tag,
- digest: canonical.Digest(),
- }, nil
- }
- return taggedReference{
- name: name.Name(),
- tag: tag,
- }, nil
-}
-
-// WithDigest combines the name from "name" and the digest from "digest" to form
-// a reference incorporating both the name and the digest.
-func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
- if !anchoredDigestRegexp.MatchString(digest.String()) {
- return nil, ErrDigestInvalidFormat
- }
- if tagged, ok := name.(Tagged); ok {
- return reference{
- name: name.Name(),
- tag: tagged.Tag(),
- digest: digest,
- }, nil
- }
- return canonicalReference{
- name: name.Name(),
- digest: digest,
- }, nil
-}
-
-// Match reports whether ref matches the specified pattern.
-// See https://godoc.org/path#Match for supported patterns.
-func Match(pattern string, ref Reference) (bool, error) {
- matched, err := path.Match(pattern, ref.String())
- if namedRef, isNamed := ref.(Named); isNamed && !matched {
- matched, _ = path.Match(pattern, namedRef.Name())
- }
- return matched, err
-}
-
-// TrimNamed removes any tag or digest from the named reference.
-func TrimNamed(ref Named) Named {
- return repository(ref.Name())
-}
-
-func getBestReferenceType(ref reference) Reference {
- if ref.name == "" {
- // Allow digest only references
- if ref.digest != "" {
- return digestReference(ref.digest)
- }
- return nil
- }
- if ref.tag == "" {
- if ref.digest != "" {
- return canonicalReference{
- name: ref.name,
- digest: ref.digest,
- }
- }
- return repository(ref.name)
- }
- if ref.digest == "" {
- return taggedReference{
- name: ref.name,
- tag: ref.tag,
- }
- }
-
- return ref
-}
-
-type reference struct {
- name string
- tag string
- digest digest.Digest
-}
-
-func (r reference) String() string {
- return r.name + ":" + r.tag + "@" + r.digest.String()
-}
-
-func (r reference) Name() string {
- return r.name
-}
-
-func (r reference) Tag() string {
- return r.tag
-}
-
-func (r reference) Digest() digest.Digest {
- return r.digest
-}
-
-type repository string
-
-func (r repository) String() string {
- return string(r)
-}
-
-func (r repository) Name() string {
- return string(r)
-}
-
-type digestReference digest.Digest
-
-func (d digestReference) String() string {
- return d.String()
-}
-
-func (d digestReference) Digest() digest.Digest {
- return digest.Digest(d)
-}
-
-type taggedReference struct {
- name string
- tag string
-}
-
-func (t taggedReference) String() string {
- return t.name + ":" + t.tag
-}
-
-func (t taggedReference) Name() string {
- return t.name
-}
-
-func (t taggedReference) Tag() string {
- return t.tag
-}
-
-type canonicalReference struct {
- name string
- digest digest.Digest
-}
-
-func (c canonicalReference) String() string {
- return c.name + "@" + c.digest.String()
-}
-
-func (c canonicalReference) Name() string {
- return c.name
-}
-
-func (c canonicalReference) Digest() digest.Digest {
- return c.digest
-}
diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go
deleted file mode 100644
index 9a7d366..0000000
--- a/vendor/github.com/docker/distribution/reference/regexp.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package reference
-
-import "regexp"
-
-var (
- // alphaNumericRegexp defines the alpha numeric atom, typically a
- // component of names. This only allows lower case characters and digits.
- alphaNumericRegexp = match(`[a-z0-9]+`)
-
- // separatorRegexp defines the separators allowed to be embedded in name
- // components. This allow one period, one or two underscore and multiple
- // dashes.
- separatorRegexp = match(`(?:[._]|__|[-]*)`)
-
- // nameComponentRegexp restricts registry path component names to start
- // with at least one letter or number, with following parts able to be
- // separated by one period, one or two underscore and multiple dashes.
- nameComponentRegexp = expression(
- alphaNumericRegexp,
- optional(repeated(separatorRegexp, alphaNumericRegexp)))
-
- // hostnameComponentRegexp restricts the registry hostname component of a
- // repository name to start with a component as defined by hostnameRegexp
- // and followed by an optional port.
- hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
-
- // hostnameRegexp defines the structure of potential hostname components
- // that may be part of image names. This is purposely a subset of what is
- // allowed by DNS to ensure backwards compatibility with Docker image
- // names.
- hostnameRegexp = expression(
- hostnameComponentRegexp,
- optional(repeated(literal(`.`), hostnameComponentRegexp)),
- optional(literal(`:`), match(`[0-9]+`)))
-
- // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
- TagRegexp = match(`[\w][\w.-]{0,127}`)
-
- // anchoredTagRegexp matches valid tag names, anchored at the start and
- // end of the matched string.
- anchoredTagRegexp = anchored(TagRegexp)
-
- // DigestRegexp matches valid digests.
- DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
-
- // anchoredDigestRegexp matches valid digests, anchored at the start and
- // end of the matched string.
- anchoredDigestRegexp = anchored(DigestRegexp)
-
- // NameRegexp is the format for the name component of references. The
- // regexp has capturing groups for the hostname and name part omitting
- // the separating forward slash from either.
- NameRegexp = expression(
- optional(hostnameRegexp, literal(`/`)),
- nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp)))
-
- // anchoredNameRegexp is used to parse a name value, capturing the
- // hostname and trailing components.
- anchoredNameRegexp = anchored(
- optional(capture(hostnameRegexp), literal(`/`)),
- capture(nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp))))
-
- // ReferenceRegexp is the full supported format of a reference. The regexp
- // is anchored and has capturing groups for name, tag, and digest
- // components.
- ReferenceRegexp = anchored(capture(NameRegexp),
- optional(literal(":"), capture(TagRegexp)),
- optional(literal("@"), capture(DigestRegexp)))
-)
-
-// match compiles the string to a regular expression.
-var match = regexp.MustCompile
-
-// literal compiles s into a literal regular expression, escaping any regexp
-// reserved characters.
-func literal(s string) *regexp.Regexp {
- re := match(regexp.QuoteMeta(s))
-
- if _, complete := re.LiteralPrefix(); !complete {
- panic("must be a literal")
- }
-
- return re
-}
-
-// expression defines a full expression, where each regular expression must
-// follow the previous.
-func expression(res ...*regexp.Regexp) *regexp.Regexp {
- var s string
- for _, re := range res {
- s += re.String()
- }
-
- return match(s)
-}
-
-// optional wraps the expression in a non-capturing group and makes the
-// production optional.
-func optional(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `?`)
-}
-
-// repeated wraps the regexp in a non-capturing group to get one or more
-// matches.
-func repeated(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `+`)
-}
-
-// group wraps the regexp in a non-capturing group.
-func group(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(?:` + expression(res...).String() + `)`)
-}
-
-// capture wraps the expression in a capturing group.
-func capture(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(` + expression(res...).String() + `)`)
-}
-
-// anchored anchors the regular expression by adding start and end delimiters.
-func anchored(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`^` + expression(res...).String() + `$`)
-}
diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE
deleted file mode 100644
index 8f3fee6..0000000
--- a/vendor/github.com/docker/docker/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2013-2016 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
deleted file mode 100644
index 8a37c1c..0000000
--- a/vendor/github.com/docker/docker/NOTICE
+++ /dev/null
@@ -1,19 +0,0 @@
-Docker
-Copyright 2012-2016 Docker, Inc.
-
-This product includes software developed at Docker, Inc. (https://www.docker.com).
-
-This product contains software (https://github.com/kr/pty) developed
-by Keith Rarick, licensed under the MIT License.
-
-The following is courtesy of our legal counsel:
-
-
-Use and transfer of Docker may be subject to certain restrictions by the
-United States and other governments.
-It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws.
-
-For more information, please see https://www.bis.doc.gov
-
-See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
deleted file mode 100644
index be20765..0000000
--- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package stdcopy
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "sync"
-)
-
-// StdType is the type of standard stream
-// a writer can multiplex to.
-type StdType byte
-
-const (
- // Stdin represents standard input stream type.
- Stdin StdType = iota
- // Stdout represents standard output stream type.
- Stdout
- // Stderr represents standard error steam type.
- Stderr
-
- stdWriterPrefixLen = 8
- stdWriterFdIndex = 0
- stdWriterSizeIndex = 4
-
- startingBufLen = 32*1024 + stdWriterPrefixLen + 1
-)
-
-var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
-
-// stdWriter is wrapper of io.Writer with extra customized info.
-type stdWriter struct {
- io.Writer
- prefix byte
-}
-
-// Write sends the buffer to the underneath writer.
-// It inserts the prefix header before the buffer,
-// so stdcopy.StdCopy knows where to multiplex the output.
-// It makes stdWriter to implement io.Writer.
-func (w *stdWriter) Write(p []byte) (n int, err error) {
- if w == nil || w.Writer == nil {
- return 0, errors.New("Writer not instantiated")
- }
- if p == nil {
- return 0, nil
- }
-
- header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
- binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
- buf := bufPool.Get().(*bytes.Buffer)
- buf.Write(header[:])
- buf.Write(p)
-
- n, err = w.Writer.Write(buf.Bytes())
- n -= stdWriterPrefixLen
- if n < 0 {
- n = 0
- }
-
- buf.Reset()
- bufPool.Put(buf)
- return
-}
-
-// NewStdWriter instantiates a new Writer.
-// Everything written to it will be encapsulated using a custom format,
-// and written to the underlying `w` stream.
-// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
-// `t` indicates the id of the stream to encapsulate.
-// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
-func NewStdWriter(w io.Writer, t StdType) io.Writer {
- return &stdWriter{
- Writer: w,
- prefix: byte(t),
- }
-}
-
-// StdCopy is a modified version of io.Copy.
-//
-// StdCopy will demultiplex `src`, assuming that it contains two streams,
-// previously multiplexed together using a StdWriter instance.
-// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
-//
-// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
-// In other words: if `err` is non nil, it indicates a real underlying error.
-//
-// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
-func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
- var (
- buf = make([]byte, startingBufLen)
- bufLen = len(buf)
- nr, nw int
- er, ew error
- out io.Writer
- frameSize int
- )
-
- for {
- // Make sure we have at least a full header
- for nr < stdWriterPrefixLen {
- var nr2 int
- nr2, er = src.Read(buf[nr:])
- nr += nr2
- if er == io.EOF {
- if nr < stdWriterPrefixLen {
- return written, nil
- }
- break
- }
- if er != nil {
- return 0, er
- }
- }
-
- // Check the first byte to know where to write
- switch StdType(buf[stdWriterFdIndex]) {
- case Stdin:
- fallthrough
- case Stdout:
- // Write on stdout
- out = dstout
- case Stderr:
- // Write on stderr
- out = dsterr
- default:
- return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
- }
-
- // Retrieve the size of the frame
- frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
-
- // Check if the buffer is big enough to read the frame.
- // Extend it if necessary.
- if frameSize+stdWriterPrefixLen > bufLen {
- buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
- bufLen = len(buf)
- }
-
- // While the amount of bytes read is less than the size of the frame + header, we keep reading
- for nr < frameSize+stdWriterPrefixLen {
- var nr2 int
- nr2, er = src.Read(buf[nr:])
- nr += nr2
- if er == io.EOF {
- if nr < frameSize+stdWriterPrefixLen {
- return written, nil
- }
- break
- }
- if er != nil {
- return 0, er
- }
- }
-
- // Write the retrieved frame (without header)
- nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
- if ew != nil {
- return 0, ew
- }
- // If the frame has not been fully written: error
- if nw != frameSize {
- return 0, io.ErrShortWrite
- }
- written += int64(nw)
-
- // Move the rest of the buffer to the beginning
- copy(buf, buf[frameSize+stdWriterPrefixLen:])
- // Move the index
- nr -= frameSize + stdWriterPrefixLen
- }
-}
diff --git a/vendor/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE
deleted file mode 100644
index c157bff..0000000
--- a/vendor/github.com/docker/engine-api/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2015-2016 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/engine-api/client/checkpoint_create.go b/vendor/github.com/docker/engine-api/client/checkpoint_create.go
deleted file mode 100644
index 23883cc..0000000
--- a/vendor/github.com/docker/engine-api/client/checkpoint_create.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package client
-
-import (
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// CheckpointCreate creates a checkpoint from the given container with the given name
-func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
- resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/checkpoint_delete.go b/vendor/github.com/docker/engine-api/client/checkpoint_delete.go
deleted file mode 100644
index a4e9ed0..0000000
--- a/vendor/github.com/docker/engine-api/client/checkpoint_delete.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package client
-
-import (
- "golang.org/x/net/context"
-)
-
-// CheckpointDelete deletes the checkpoint with the given name from the given container
-func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, checkpointID string) error {
- resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+checkpointID, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/checkpoint_list.go b/vendor/github.com/docker/engine-api/client/checkpoint_list.go
deleted file mode 100644
index ef5ec26..0000000
--- a/vendor/github.com/docker/engine-api/client/checkpoint_list.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// CheckpointList returns the volumes configured in the docker host.
-func (cli *Client) CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) {
- var checkpoints []types.Checkpoint
-
- resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", nil, nil)
- if err != nil {
- return checkpoints, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&checkpoints)
- ensureReaderClosed(resp)
- return checkpoints, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/client.go b/vendor/github.com/docker/engine-api/client/client.go
deleted file mode 100644
index 02a70ba..0000000
--- a/vendor/github.com/docker/engine-api/client/client.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package client
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/docker/engine-api/client/transport"
- "github.com/docker/go-connections/tlsconfig"
-)
-
-// DefaultVersion is the version of the current stable API
-const DefaultVersion string = "1.23"
-
-// Client is the API client that performs all operations
-// against a docker server.
-type Client struct {
- // host holds the server address to connect to
- host string
- // proto holds the client protocol i.e. unix.
- proto string
- // addr holds the client address.
- addr string
- // basePath holds the path to prepend to the requests.
- basePath string
- // transport is the interface to send request with, it implements transport.Client.
- transport transport.Client
- // version of the server to talk to.
- version string
- // custom http headers configured by users.
- customHTTPHeaders map[string]string
-}
-
-// NewEnvClient initializes a new API client based on environment variables.
-// Use DOCKER_HOST to set the url to the docker server.
-// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
-// Use DOCKER_CERT_PATH to load the tls certificates from.
-// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
-func NewEnvClient() (*Client, error) {
- var client *http.Client
- if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
- options := tlsconfig.Options{
- CAFile: filepath.Join(dockerCertPath, "ca.pem"),
- CertFile: filepath.Join(dockerCertPath, "cert.pem"),
- KeyFile: filepath.Join(dockerCertPath, "key.pem"),
- InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
- }
- tlsc, err := tlsconfig.Client(options)
- if err != nil {
- return nil, err
- }
-
- client = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: tlsc,
- },
- }
- }
-
- host := os.Getenv("DOCKER_HOST")
- if host == "" {
- host = DefaultDockerHost
- }
-
- version := os.Getenv("DOCKER_API_VERSION")
- if version == "" {
- version = DefaultVersion
- }
-
- return NewClient(host, version, client, nil)
-}
-
-// NewClient initializes a new API client for the given host and API version.
-// It uses the given http client as transport.
-// It also initializes the custom http headers to add to each request.
-//
-// It won't send any version information if the version number is empty. It is
-// highly recommended that you set a version or your client may break if the
-// server is upgraded.
-func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
- proto, addr, basePath, err := ParseHost(host)
- if err != nil {
- return nil, err
- }
-
- transport, err := transport.NewTransportWithHTTP(proto, addr, client)
- if err != nil {
- return nil, err
- }
-
- return &Client{
- host: host,
- proto: proto,
- addr: addr,
- basePath: basePath,
- transport: transport,
- version: version,
- customHTTPHeaders: httpHeaders,
- }, nil
-}
-
-// getAPIPath returns the versioned request path to call the api.
-// It appends the query parameters to the path if they are not empty.
-func (cli *Client) getAPIPath(p string, query url.Values) string {
- var apiPath string
- if cli.version != "" {
- v := strings.TrimPrefix(cli.version, "v")
- apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p)
- } else {
- apiPath = fmt.Sprintf("%s%s", cli.basePath, p)
- }
-
- u := &url.URL{
- Path: apiPath,
- }
- if len(query) > 0 {
- u.RawQuery = query.Encode()
- }
- return u.String()
-}
-
-// ClientVersion returns the version string associated with this
-// instance of the Client. Note that this value can be changed
-// via the DOCKER_API_VERSION env var.
-func (cli *Client) ClientVersion() string {
- return cli.version
-}
-
-// UpdateClientVersion updates the version string associated with this
-// instance of the Client.
-func (cli *Client) UpdateClientVersion(v string) {
- cli.version = v
-}
-
-// ParseHost verifies that the given host strings is valid.
-func ParseHost(host string) (string, string, string, error) {
- protoAddrParts := strings.SplitN(host, "://", 2)
- if len(protoAddrParts) == 1 {
- return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
- }
-
- var basePath string
- proto, addr := protoAddrParts[0], protoAddrParts[1]
- if proto == "tcp" {
- parsed, err := url.Parse("tcp://" + addr)
- if err != nil {
- return "", "", "", err
- }
- addr = parsed.Host
- basePath = parsed.Path
- }
- return proto, addr, basePath, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/client_unix.go b/vendor/github.com/docker/engine-api/client/client_unix.go
deleted file mode 100644
index 89de892..0000000
--- a/vendor/github.com/docker/engine-api/client/client_unix.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build linux freebsd solaris openbsd darwin
-
-package client
-
-// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
-const DefaultDockerHost = "unix:///var/run/docker.sock"
diff --git a/vendor/github.com/docker/engine-api/client/client_windows.go b/vendor/github.com/docker/engine-api/client/client_windows.go
deleted file mode 100644
index 07c0c7a..0000000
--- a/vendor/github.com/docker/engine-api/client/client_windows.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package client
-
-// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
-const DefaultDockerHost = "npipe:////./pipe/docker_engine"
diff --git a/vendor/github.com/docker/engine-api/client/container_attach.go b/vendor/github.com/docker/engine-api/client/container_attach.go
deleted file mode 100644
index 1b616bf..0000000
--- a/vendor/github.com/docker/engine-api/client/container_attach.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package client
-
-import (
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ContainerAttach attaches a connection to a container in the server.
-// It returns a types.HijackedConnection with the hijacked connection
-// and the a reader to get output. It's up to the called to close
-// the hijacked connection by calling types.HijackedResponse.Close.
-func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
- query := url.Values{}
- if options.Stream {
- query.Set("stream", "1")
- }
- if options.Stdin {
- query.Set("stdin", "1")
- }
- if options.Stdout {
- query.Set("stdout", "1")
- }
- if options.Stderr {
- query.Set("stderr", "1")
- }
- if options.DetachKeys != "" {
- query.Set("detachKeys", options.DetachKeys)
- }
-
- headers := map[string][]string{"Content-Type": {"text/plain"}}
- return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_commit.go b/vendor/github.com/docker/engine-api/client/container_commit.go
deleted file mode 100644
index d5c4749..0000000
--- a/vendor/github.com/docker/engine-api/client/container_commit.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "errors"
- "net/url"
-
- distreference "github.com/docker/distribution/reference"
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/reference"
- "golang.org/x/net/context"
-)
-
-// ContainerCommit applies changes into a container and creates a new tagged image.
-func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) {
- var repository, tag string
- if options.Reference != "" {
- distributionRef, err := distreference.ParseNamed(options.Reference)
- if err != nil {
- return types.ContainerCommitResponse{}, err
- }
-
- if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
- return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference")
- }
-
- tag = reference.GetTagFromNamedRef(distributionRef)
- repository = distributionRef.Name()
- }
-
- query := url.Values{}
- query.Set("container", container)
- query.Set("repo", repository)
- query.Set("tag", tag)
- query.Set("comment", options.Comment)
- query.Set("author", options.Author)
- for _, change := range options.Changes {
- query.Add("changes", change)
- }
- if options.Pause != true {
- query.Set("pause", "0")
- }
-
- var response types.ContainerCommitResponse
- resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- ensureReaderClosed(resp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_copy.go b/vendor/github.com/docker/engine-api/client/container_copy.go
deleted file mode 100644
index d3dd0b1..0000000
--- a/vendor/github.com/docker/engine-api/client/container_copy.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package client
-
-import (
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "path/filepath"
- "strings"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
-)
-
-// ContainerStatPath returns Stat information about a path inside the container filesystem.
-func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
- query := url.Values{}
- query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
-
- urlStr := fmt.Sprintf("/containers/%s/archive", containerID)
- response, err := cli.head(ctx, urlStr, query, nil)
- if err != nil {
- return types.ContainerPathStat{}, err
- }
- defer ensureReaderClosed(response)
- return getContainerPathStatFromHeader(response.header)
-}
-
-// CopyToContainer copies content into the container filesystem.
-func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error {
- query := url.Values{}
- query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
- // Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
- if !options.AllowOverwriteDirWithFile {
- query.Set("noOverwriteDirNonDir", "true")
- }
-
- apiPath := fmt.Sprintf("/containers/%s/archive", container)
-
- response, err := cli.putRaw(ctx, apiPath, query, content, nil)
- if err != nil {
- return err
- }
- defer ensureReaderClosed(response)
-
- if response.statusCode != http.StatusOK {
- return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
- }
-
- return nil
-}
-
-// CopyFromContainer gets the content from the container and returns it as a Reader
-// to manipulate it in the host. It's up to the caller to close the reader.
-func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
- query := make(url.Values, 1)
- query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
-
- apiPath := fmt.Sprintf("/containers/%s/archive", container)
- response, err := cli.get(ctx, apiPath, query, nil)
- if err != nil {
- return nil, types.ContainerPathStat{}, err
- }
-
- if response.statusCode != http.StatusOK {
- return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
- }
-
- // In order to get the copy behavior right, we need to know information
- // about both the source and the destination. The response headers include
- // stat info about the source that we can use in deciding exactly how to
- // copy it locally. Along with the stat info about the local destination,
- // we have everything we need to handle the multiple possibilities there
- // can be when copying a file/dir from one location to another file/dir.
- stat, err := getContainerPathStatFromHeader(response.header)
- if err != nil {
- return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
- }
- return response.body, stat, err
-}
-
-func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
- var stat types.ContainerPathStat
-
- encodedStat := header.Get("X-Docker-Container-Path-Stat")
- statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
-
- err := json.NewDecoder(statDecoder).Decode(&stat)
- if err != nil {
- err = fmt.Errorf("unable to decode container path stat header: %s", err)
- }
-
- return stat, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_create.go b/vendor/github.com/docker/engine-api/client/container_create.go
deleted file mode 100644
index bbef1c9..0000000
--- a/vendor/github.com/docker/engine-api/client/container_create.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
- "strings"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/container"
- "github.com/docker/engine-api/types/network"
- "golang.org/x/net/context"
-)
-
-type configWrapper struct {
- *container.Config
- HostConfig *container.HostConfig
- NetworkingConfig *network.NetworkingConfig
-}
-
-// ContainerCreate creates a new container based in the given configuration.
-// It can be associated with a name, but it's not mandatory.
-func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) {
- var response types.ContainerCreateResponse
- query := url.Values{}
- if containerName != "" {
- query.Set("name", containerName)
- }
-
- body := configWrapper{
- Config: config,
- HostConfig: hostConfig,
- NetworkingConfig: networkingConfig,
- }
-
- serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
- if err != nil {
- if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
- return response, imageNotFoundError{config.Image}
- }
- return response, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&response)
- ensureReaderClosed(serverResp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_diff.go b/vendor/github.com/docker/engine-api/client/container_diff.go
deleted file mode 100644
index f4bb3a4..0000000
--- a/vendor/github.com/docker/engine-api/client/container_diff.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ContainerDiff shows differences in a container filesystem since it was started.
-func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) {
- var changes []types.ContainerChange
-
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
- if err != nil {
- return changes, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&changes)
- ensureReaderClosed(serverResp)
- return changes, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_exec.go b/vendor/github.com/docker/engine-api/client/container_exec.go
deleted file mode 100644
index ff7e1a9..0000000
--- a/vendor/github.com/docker/engine-api/client/container_exec.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ContainerExecCreate creates a new exec configuration to run an exec process.
-func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) {
- var response types.ContainerExecCreateResponse
- resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
- if err != nil {
- return response, err
- }
- err = json.NewDecoder(resp.body).Decode(&response)
- ensureReaderClosed(resp)
- return response, err
-}
-
-// ContainerExecStart starts an exec process already created in the docker host.
-func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
- resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
- ensureReaderClosed(resp)
- return err
-}
-
-// ContainerExecAttach attaches a connection to an exec process in the server.
-// It returns a types.HijackedConnection with the hijacked connection
-// and the a reader to get output. It's up to the called to close
-// the hijacked connection by calling types.HijackedResponse.Close.
-func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) {
- headers := map[string][]string{"Content-Type": {"application/json"}}
- return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
-}
-
-// ContainerExecInspect returns information about a specific exec process on the docker host.
-func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
- var response types.ContainerExecInspect
- resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- ensureReaderClosed(resp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_export.go b/vendor/github.com/docker/engine-api/client/container_export.go
deleted file mode 100644
index 52194f3..0000000
--- a/vendor/github.com/docker/engine-api/client/container_export.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package client
-
-import (
- "io"
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-// ContainerExport retrieves the raw contents of a container
-// and returns them as an io.ReadCloser. It's up to the caller
-// to close the stream.
-func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
- if err != nil {
- return nil, err
- }
-
- return serverResp.body, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_inspect.go b/vendor/github.com/docker/engine-api/client/container_inspect.go
deleted file mode 100644
index 0fa096d..0000000
--- a/vendor/github.com/docker/engine-api/client/container_inspect.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package client
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ContainerInspect returns the container information.
-func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
- if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ContainerJSON{}, containerNotFoundError{containerID}
- }
- return types.ContainerJSON{}, err
- }
-
- var response types.ContainerJSON
- err = json.NewDecoder(serverResp.body).Decode(&response)
- ensureReaderClosed(serverResp)
- return response, err
-}
-
-// ContainerInspectWithRaw returns the container information and its raw representation.
-func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
- query := url.Values{}
- if getSize {
- query.Set("size", "1")
- }
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
- if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
- }
- return types.ContainerJSON{}, nil, err
- }
- defer ensureReaderClosed(serverResp)
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return types.ContainerJSON{}, nil, err
- }
-
- var response types.ContainerJSON
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_kill.go b/vendor/github.com/docker/engine-api/client/container_kill.go
deleted file mode 100644
index 29f80c7..0000000
--- a/vendor/github.com/docker/engine-api/client/container_kill.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package client
-
-import (
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-// ContainerKill terminates the container process but does not remove the container from the docker host.
-func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
- query := url.Values{}
- query.Set("signal", signal)
-
- resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_list.go b/vendor/github.com/docker/engine-api/client/container_list.go
deleted file mode 100644
index 87f7333..0000000
--- a/vendor/github.com/docker/engine-api/client/container_list.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
- "strconv"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- "golang.org/x/net/context"
-)
-
-// ContainerList returns the list of containers in the docker host.
-func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
- query := url.Values{}
-
- if options.All {
- query.Set("all", "1")
- }
-
- if options.Limit != -1 {
- query.Set("limit", strconv.Itoa(options.Limit))
- }
-
- if options.Since != "" {
- query.Set("since", options.Since)
- }
-
- if options.Before != "" {
- query.Set("before", options.Before)
- }
-
- if options.Size {
- query.Set("size", "1")
- }
-
- if options.Filter.Len() > 0 {
- filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter)
-
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/containers/json", query, nil)
- if err != nil {
- return nil, err
- }
-
- var containers []types.Container
- err = json.NewDecoder(resp.body).Decode(&containers)
- ensureReaderClosed(resp)
- return containers, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_logs.go b/vendor/github.com/docker/engine-api/client/container_logs.go
deleted file mode 100644
index 08b9b91..0000000
--- a/vendor/github.com/docker/engine-api/client/container_logs.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package client
-
-import (
- "io"
- "net/url"
- "time"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
- timetypes "github.com/docker/engine-api/types/time"
-)
-
-// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
-// It's up to the caller to close the stream.
-func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
- query := url.Values{}
- if options.ShowStdout {
- query.Set("stdout", "1")
- }
-
- if options.ShowStderr {
- query.Set("stderr", "1")
- }
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, time.Now())
- if err != nil {
- return nil, err
- }
- query.Set("since", ts)
- }
-
- if options.Timestamps {
- query.Set("timestamps", "1")
- }
-
- if options.Details {
- query.Set("details", "1")
- }
-
- if options.Follow {
- query.Set("follow", "1")
- }
- query.Set("tail", options.Tail)
-
- resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_pause.go b/vendor/github.com/docker/engine-api/client/container_pause.go
deleted file mode 100644
index 412067a..0000000
--- a/vendor/github.com/docker/engine-api/client/container_pause.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client
-
-import "golang.org/x/net/context"
-
-// ContainerPause pauses the main process of a given container without terminating it.
-func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_remove.go b/vendor/github.com/docker/engine-api/client/container_remove.go
deleted file mode 100644
index cef4b81..0000000
--- a/vendor/github.com/docker/engine-api/client/container_remove.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package client
-
-import (
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ContainerRemove kills and removes a container from the docker host.
-func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
- query := url.Values{}
- if options.RemoveVolumes {
- query.Set("v", "1")
- }
- if options.RemoveLinks {
- query.Set("link", "1")
- }
-
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_rename.go b/vendor/github.com/docker/engine-api/client/container_rename.go
deleted file mode 100644
index 0e718da..0000000
--- a/vendor/github.com/docker/engine-api/client/container_rename.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package client
-
-import (
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-// ContainerRename changes the name of a given container.
-func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
- query := url.Values{}
- query.Set("name", newContainerName)
- resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_resize.go b/vendor/github.com/docker/engine-api/client/container_resize.go
deleted file mode 100644
index b95d26b..0000000
--- a/vendor/github.com/docker/engine-api/client/container_resize.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package client
-
-import (
- "net/url"
- "strconv"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ContainerResize changes the size of the tty for a container.
-func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error {
- return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
-}
-
-// ContainerExecResize changes the size of the tty for an exec process running inside a container.
-func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error {
- return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
-}
-
-func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error {
- query := url.Values{}
- query.Set("h", strconv.Itoa(height))
- query.Set("w", strconv.Itoa(width))
-
- resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_restart.go b/vendor/github.com/docker/engine-api/client/container_restart.go
deleted file mode 100644
index 93c042d..0000000
--- a/vendor/github.com/docker/engine-api/client/container_restart.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client
-
-import (
- "net/url"
- "time"
-
- timetypes "github.com/docker/engine-api/types/time"
- "golang.org/x/net/context"
-)
-
-// ContainerRestart stops and starts a container again.
-// It makes the daemon to wait for the container to be up again for
-// a specific amount of time, given the timeout.
-func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error {
- query := url.Values{}
- if timeout != nil {
- query.Set("t", timetypes.DurationToSecondsString(*timeout))
- }
- resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_start.go b/vendor/github.com/docker/engine-api/client/container_start.go
deleted file mode 100644
index 1e22eec..0000000
--- a/vendor/github.com/docker/engine-api/client/container_start.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client
-
-import (
- "net/url"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
-)
-
-// ContainerStart sends a request to the docker daemon to start a container.
-func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error {
- query := url.Values{}
- if len(options.CheckpointID) != 0 {
- query.Set("checkpoint", options.CheckpointID)
- }
-
- resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_stats.go b/vendor/github.com/docker/engine-api/client/container_stats.go
deleted file mode 100644
index 2cc67c3..0000000
--- a/vendor/github.com/docker/engine-api/client/container_stats.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package client
-
-import (
- "io"
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-// ContainerStats returns near realtime stats for a given container.
-// It's up to the caller to close the io.ReadCloser returned.
-func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) {
- query := url.Values{}
- query.Set("stream", "0")
- if stream {
- query.Set("stream", "1")
- }
-
- resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_stop.go b/vendor/github.com/docker/engine-api/client/container_stop.go
deleted file mode 100644
index 1fc577f..0000000
--- a/vendor/github.com/docker/engine-api/client/container_stop.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client
-
-import (
- "net/url"
- "time"
-
- timetypes "github.com/docker/engine-api/types/time"
- "golang.org/x/net/context"
-)
-
-// ContainerStop stops a container without terminating the process.
-// The process is blocked until the container stops or the timeout expires.
-func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error {
- query := url.Values{}
- if timeout != nil {
- query.Set("t", timetypes.DurationToSecondsString(*timeout))
- }
- resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_top.go b/vendor/github.com/docker/engine-api/client/container_top.go
deleted file mode 100644
index 5ad926a..0000000
--- a/vendor/github.com/docker/engine-api/client/container_top.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
- "strings"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ContainerTop shows process information from within a container.
-func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) {
- var response types.ContainerProcessList
- query := url.Values{}
- if len(arguments) > 0 {
- query.Set("ps_args", strings.Join(arguments, " "))
- }
-
- resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- ensureReaderClosed(resp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_unpause.go b/vendor/github.com/docker/engine-api/client/container_unpause.go
deleted file mode 100644
index 5c76211..0000000
--- a/vendor/github.com/docker/engine-api/client/container_unpause.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client
-
-import "golang.org/x/net/context"
-
-// ContainerUnpause resumes the process execution within a container
-func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_update.go b/vendor/github.com/docker/engine-api/client/container_update.go
deleted file mode 100644
index 92e1f27..0000000
--- a/vendor/github.com/docker/engine-api/client/container_update.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/container"
- "golang.org/x/net/context"
-)
-
-// ContainerUpdate updates resources of a container
-func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (types.ContainerUpdateResponse, error) {
- var response types.ContainerUpdateResponse
- serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&response)
-
- ensureReaderClosed(serverResp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/container_wait.go b/vendor/github.com/docker/engine-api/client/container_wait.go
deleted file mode 100644
index c26ff3f..0000000
--- a/vendor/github.com/docker/engine-api/client/container_wait.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
-)
-
-// ContainerWait pauses execution until a container exits.
-// It returns the API status code as response of its readiness.
-func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
- if err != nil {
- return -1, err
- }
- defer ensureReaderClosed(resp)
-
- var res types.ContainerWaitResponse
- if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
- return -1, err
- }
-
- return res.StatusCode, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/errors.go b/vendor/github.com/docker/engine-api/client/errors.go
deleted file mode 100644
index 71e25a7..0000000
--- a/vendor/github.com/docker/engine-api/client/errors.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package client
-
-import (
- "errors"
- "fmt"
-)
-
-// ErrConnectionFailed is an error raised when the connection between the client and the server failed.
-var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
-
-// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
-func ErrorConnectionFailed(host string) error {
- return fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host)
-}
-
-type notFound interface {
- error
- NotFound() bool // Is the error a NotFound error
-}
-
-// IsErrNotFound returns true if the error is caused with an
-// object (image, container, network, volume, …) is not found in the docker host.
-func IsErrNotFound(err error) bool {
- te, ok := err.(notFound)
- return ok && te.NotFound()
-}
-
-// imageNotFoundError implements an error returned when an image is not in the docker host.
-type imageNotFoundError struct {
- imageID string
-}
-
-// NoFound indicates that this error type is of NotFound
-func (e imageNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of an imageNotFoundError
-func (e imageNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such image: %s", e.imageID)
-}
-
-// IsErrImageNotFound returns true if the error is caused
-// when an image is not found in the docker host.
-func IsErrImageNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// containerNotFoundError implements an error returned when a container is not in the docker host.
-type containerNotFoundError struct {
- containerID string
-}
-
-// NoFound indicates that this error type is of NotFound
-func (e containerNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of a containerNotFoundError
-func (e containerNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such container: %s", e.containerID)
-}
-
-// IsErrContainerNotFound returns true if the error is caused
-// when a container is not found in the docker host.
-func IsErrContainerNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// networkNotFoundError implements an error returned when a network is not in the docker host.
-type networkNotFoundError struct {
- networkID string
-}
-
-// NoFound indicates that this error type is of NotFound
-func (e networkNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of a networkNotFoundError
-func (e networkNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such network: %s", e.networkID)
-}
-
-// IsErrNetworkNotFound returns true if the error is caused
-// when a network is not found in the docker host.
-func IsErrNetworkNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// volumeNotFoundError implements an error returned when a volume is not in the docker host.
-type volumeNotFoundError struct {
- volumeID string
-}
-
-// NoFound indicates that this error type is of NotFound
-func (e volumeNotFoundError) NotFound() bool {
- return true
-}
-
-// Error returns a string representation of a networkNotFoundError
-func (e volumeNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
-}
-
-// IsErrVolumeNotFound returns true if the error is caused
-// when a volume is not found in the docker host.
-func IsErrVolumeNotFound(err error) bool {
- return IsErrNotFound(err)
-}
-
-// unauthorizedError represents an authorization error in a remote registry.
-type unauthorizedError struct {
- cause error
-}
-
-// Error returns a string representation of an unauthorizedError
-func (u unauthorizedError) Error() string {
- return u.cause.Error()
-}
-
-// IsErrUnauthorized returns true if the error is caused
-// when a remote registry authentication fails
-func IsErrUnauthorized(err error) bool {
- _, ok := err.(unauthorizedError)
- return ok
-}
-
-// nodeNotFoundError implements an error returned when a node is not found.
-type nodeNotFoundError struct {
- nodeID string
-}
-
-// Error returns a string representation of a nodeNotFoundError
-func (e nodeNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such node: %s", e.nodeID)
-}
-
-// NoFound indicates that this error type is of NotFound
-func (e nodeNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrNodeNotFound returns true if the error is caused
-// when a node is not found.
-func IsErrNodeNotFound(err error) bool {
- _, ok := err.(nodeNotFoundError)
- return ok
-}
-
-// serviceNotFoundError implements an error returned when a service is not found.
-type serviceNotFoundError struct {
- serviceID string
-}
-
-// Error returns a string representation of a serviceNotFoundError
-func (e serviceNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such service: %s", e.serviceID)
-}
-
-// NoFound indicates that this error type is of NotFound
-func (e serviceNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrServiceNotFound returns true if the error is caused
-// when a service is not found.
-func IsErrServiceNotFound(err error) bool {
- _, ok := err.(serviceNotFoundError)
- return ok
-}
-
-// taskNotFoundError implements an error returned when a task is not found.
-type taskNotFoundError struct {
- taskID string
-}
-
-// Error returns a string representation of a taskNotFoundError
-func (e taskNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such task: %s", e.taskID)
-}
-
-// NoFound indicates that this error type is of NotFound
-func (e taskNotFoundError) NotFound() bool {
- return true
-}
-
-// IsErrTaskNotFound returns true if the error is caused
-// when a task is not found.
-func IsErrTaskNotFound(err error) bool {
- _, ok := err.(taskNotFoundError)
- return ok
-}
-
-type pluginPermissionDenied struct {
- name string
-}
-
-func (e pluginPermissionDenied) Error() string {
- return "Permission denied while installing plugin " + e.name
-}
-
-// IsErrPluginPermissionDenied returns true if the error is caused
-// when a user denies a plugin's permissions
-func IsErrPluginPermissionDenied(err error) bool {
- _, ok := err.(pluginPermissionDenied)
- return ok
-}
diff --git a/vendor/github.com/docker/engine-api/client/events.go b/vendor/github.com/docker/engine-api/client/events.go
deleted file mode 100644
index f22a18e..0000000
--- a/vendor/github.com/docker/engine-api/client/events.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package client
-
-import (
- "io"
- "net/url"
- "time"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- timetypes "github.com/docker/engine-api/types/time"
-)
-
-// Events returns a stream of events in the daemon in a ReadCloser.
-// It's up to the caller to close the stream.
-func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) {
- query := url.Values{}
- ref := time.Now()
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, ref)
- if err != nil {
- return nil, err
- }
- query.Set("since", ts)
- }
- if options.Until != "" {
- ts, err := timetypes.GetTimestamp(options.Until, ref)
- if err != nil {
- return nil, err
- }
- query.Set("until", ts)
- }
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
- if err != nil {
- return nil, err
- }
- query.Set("filters", filterJSON)
- }
-
- serverResponse, err := cli.get(ctx, "/events", query, nil)
- if err != nil {
- return nil, err
- }
- return serverResponse.body, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/hijack.go b/vendor/github.com/docker/engine-api/client/hijack.go
deleted file mode 100644
index b7a117e..0000000
--- a/vendor/github.com/docker/engine-api/client/hijack.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package client
-
-import (
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "net/http/httputil"
- "net/url"
- "strings"
- "time"
-
- "github.com/docker/engine-api/client/transport"
- "github.com/docker/engine-api/types"
- "github.com/docker/go-connections/sockets"
- "golang.org/x/net/context"
-)
-
-// tlsClientCon holds tls information and a dialed connection.
-type tlsClientCon struct {
- *tls.Conn
- rawConn net.Conn
-}
-
-func (c *tlsClientCon) CloseWrite() error {
- // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
- // on its underlying connection.
- if conn, ok := c.rawConn.(types.CloseWriter); ok {
- return conn.CloseWrite()
- }
- return nil
-}
-
-// postHijacked sends a POST request and hijacks the connection.
-func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
- bodyEncoded, err := encodeData(body)
- if err != nil {
- return types.HijackedResponse{}, err
- }
-
- req, err := cli.newRequest("POST", path, query, bodyEncoded, headers)
- if err != nil {
- return types.HijackedResponse{}, err
- }
- req.Host = cli.addr
-
- req.Header.Set("Connection", "Upgrade")
- req.Header.Set("Upgrade", "tcp")
-
- conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig())
- if err != nil {
- if strings.Contains(err.Error(), "connection refused") {
- return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
- }
- return types.HijackedResponse{}, err
- }
-
- // When we set up a TCP connection for hijack, there could be long periods
- // of inactivity (a long running command with no output) that in certain
- // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
- // state. Setting TCP KeepAlive on the socket connection will prohibit
- // ECONNTIMEOUT unless the socket connection truly is broken
- if tcpConn, ok := conn.(*net.TCPConn); ok {
- tcpConn.SetKeepAlive(true)
- tcpConn.SetKeepAlivePeriod(30 * time.Second)
- }
-
- clientconn := httputil.NewClientConn(conn, nil)
- defer clientconn.Close()
-
- // Server hijacks the connection, error 'connection closed' expected
- _, err = clientconn.Do(req)
-
- rwc, br := clientconn.Hijack()
-
- return types.HijackedResponse{Conn: rwc, Reader: br}, err
-}
-
-func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
- return tlsDialWithDialer(new(net.Dialer), network, addr, config)
-}
-
-// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
-// order to return our custom tlsClientCon struct which holds both the tls.Conn
-// object _and_ its underlying raw connection. The rationale for this is that
-// we need to be able to close the write end of the connection when attaching,
-// which tls.Conn does not provide.
-func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
- // We want the Timeout and Deadline values from dialer to cover the
- // whole process: TCP connection and TLS handshake. This means that we
- // also need to start our own timers now.
- timeout := dialer.Timeout
-
- if !dialer.Deadline.IsZero() {
- deadlineTimeout := dialer.Deadline.Sub(time.Now())
- if timeout == 0 || deadlineTimeout < timeout {
- timeout = deadlineTimeout
- }
- }
-
- var errChannel chan error
-
- if timeout != 0 {
- errChannel = make(chan error, 2)
- time.AfterFunc(timeout, func() {
- errChannel <- errors.New("")
- })
- }
-
- proxyDialer, err := sockets.DialerFromEnvironment(dialer)
- if err != nil {
- return nil, err
- }
-
- rawConn, err := proxyDialer.Dial(network, addr)
- if err != nil {
- return nil, err
- }
- // When we set up a TCP connection for hijack, there could be long periods
- // of inactivity (a long running command with no output) that in certain
- // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
- // state. Setting TCP KeepAlive on the socket connection will prohibit
- // ECONNTIMEOUT unless the socket connection truly is broken
- if tcpConn, ok := rawConn.(*net.TCPConn); ok {
- tcpConn.SetKeepAlive(true)
- tcpConn.SetKeepAlivePeriod(30 * time.Second)
- }
-
- colonPos := strings.LastIndex(addr, ":")
- if colonPos == -1 {
- colonPos = len(addr)
- }
- hostname := addr[:colonPos]
-
- // If no ServerName is set, infer the ServerName
- // from the hostname we're connecting to.
- if config.ServerName == "" {
- // Make a copy to avoid polluting argument or default.
- config = transport.TLSConfigClone(config)
- config.ServerName = hostname
- }
-
- conn := tls.Client(rawConn, config)
-
- if timeout == 0 {
- err = conn.Handshake()
- } else {
- go func() {
- errChannel <- conn.Handshake()
- }()
-
- err = <-errChannel
- }
-
- if err != nil {
- rawConn.Close()
- return nil, err
- }
-
- // This is Docker difference with standard's crypto/tls package: returned a
- // wrapper which holds both the TLS and raw connections.
- return &tlsClientCon{conn, rawConn}, nil
-}
-
-func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
- if tlsConfig != nil && proto != "unix" && proto != "npipe" {
- // Notice this isn't Go standard's tls.Dial function
- return tlsDial(proto, addr, tlsConfig)
- }
- if proto == "npipe" {
- return sockets.DialPipe(addr, 32*time.Second)
- }
- return net.Dial(proto, addr)
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_build.go b/vendor/github.com/docker/engine-api/client/image_build.go
deleted file mode 100644
index 089d44d..0000000
--- a/vendor/github.com/docker/engine-api/client/image_build.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package client
-
-import (
- "encoding/base64"
- "encoding/json"
- "io"
- "net/http"
- "net/url"
- "regexp"
- "strconv"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/container"
-)
-
-var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
-
-// ImageBuild sends request to the daemon to build images.
-// The Body in the response implement an io.ReadCloser and it's up to the caller to
-// close it.
-func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
- query, err := imageBuildOptionsToQuery(options)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
-
- headers := http.Header(make(map[string][]string))
- buf, err := json.Marshal(options.AuthConfigs)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
- headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
- headers.Set("Content-Type", "application/tar")
-
- serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
-
- osType := getDockerOS(serverResp.header.Get("Server"))
-
- return types.ImageBuildResponse{
- Body: serverResp.body,
- OSType: osType,
- }, nil
-}
-
-func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
- query := url.Values{
- "t": options.Tags,
- }
- if options.SuppressOutput {
- query.Set("q", "1")
- }
- if options.RemoteContext != "" {
- query.Set("remote", options.RemoteContext)
- }
- if options.NoCache {
- query.Set("nocache", "1")
- }
- if options.Remove {
- query.Set("rm", "1")
- } else {
- query.Set("rm", "0")
- }
-
- if options.ForceRemove {
- query.Set("forcerm", "1")
- }
-
- if options.PullParent {
- query.Set("pull", "1")
- }
-
- if options.Squash {
- query.Set("squash", "1")
- }
-
- if !container.Isolation.IsDefault(options.Isolation) {
- query.Set("isolation", string(options.Isolation))
- }
-
- query.Set("cpusetcpus", options.CPUSetCPUs)
- query.Set("cpusetmems", options.CPUSetMems)
- query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
- query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
- query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
- query.Set("memory", strconv.FormatInt(options.Memory, 10))
- query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
- query.Set("cgroupparent", options.CgroupParent)
- query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
- query.Set("dockerfile", options.Dockerfile)
-
- ulimitsJSON, err := json.Marshal(options.Ulimits)
- if err != nil {
- return query, err
- }
- query.Set("ulimits", string(ulimitsJSON))
-
- buildArgsJSON, err := json.Marshal(options.BuildArgs)
- if err != nil {
- return query, err
- }
- query.Set("buildargs", string(buildArgsJSON))
-
- labelsJSON, err := json.Marshal(options.Labels)
- if err != nil {
- return query, err
- }
- query.Set("labels", string(labelsJSON))
- return query, nil
-}
-
-func getDockerOS(serverHeader string) string {
- var osType string
- matches := headerRegexp.FindStringSubmatch(serverHeader)
- if len(matches) > 0 {
- osType = matches[1]
- }
- return osType
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_create.go b/vendor/github.com/docker/engine-api/client/image_create.go
deleted file mode 100644
index 1528b0b..0000000
--- a/vendor/github.com/docker/engine-api/client/image_create.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package client
-
-import (
- "io"
- "net/url"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/reference"
-)
-
-// ImageCreate creates a new image based in the parent options.
-// It returns the JSON content in the response body.
-func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
- repository, tag, err := reference.Parse(parentReference)
- if err != nil {
- return nil, err
- }
-
- query := url.Values{}
- query.Set("fromImage", repository)
- query.Set("tag", tag)
- resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/images/create", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_history.go b/vendor/github.com/docker/engine-api/client/image_history.go
deleted file mode 100644
index b2840b5..0000000
--- a/vendor/github.com/docker/engine-api/client/image_history.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ImageHistory returns the changes in an image in history format.
-func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) {
- var history []types.ImageHistory
- serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil)
- if err != nil {
- return history, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&history)
- ensureReaderClosed(serverResp)
- return history, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_import.go b/vendor/github.com/docker/engine-api/client/image_import.go
deleted file mode 100644
index 4e8749a..0000000
--- a/vendor/github.com/docker/engine-api/client/image_import.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package client
-
-import (
- "io"
- "net/url"
-
- "golang.org/x/net/context"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/engine-api/types"
-)
-
-// ImageImport creates a new image based in the source options.
-// It returns the JSON content in the response body.
-func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
- if ref != "" {
- //Check if the given image name can be resolved
- if _, err := reference.ParseNamed(ref); err != nil {
- return nil, err
- }
- }
-
- query := url.Values{}
- query.Set("fromSrc", source.SourceName)
- query.Set("repo", ref)
- query.Set("tag", options.Tag)
- query.Set("message", options.Message)
- for _, change := range options.Changes {
- query.Add("changes", change)
- }
-
- resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_inspect.go b/vendor/github.com/docker/engine-api/client/image_inspect.go
deleted file mode 100644
index d5f6bc5..0000000
--- a/vendor/github.com/docker/engine-api/client/image_inspect.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package client
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ImageInspectWithRaw returns the image information and its raw representation.
-func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
- serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
- if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return types.ImageInspect{}, nil, imageNotFoundError{imageID}
- }
- return types.ImageInspect{}, nil, err
- }
- defer ensureReaderClosed(serverResp)
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return types.ImageInspect{}, nil, err
- }
-
- var response types.ImageInspect
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_list.go b/vendor/github.com/docker/engine-api/client/image_list.go
deleted file mode 100644
index 7408258..0000000
--- a/vendor/github.com/docker/engine-api/client/image_list.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- "golang.org/x/net/context"
-)
-
-// ImageList returns a list of images in the docker host.
-func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) {
- var images []types.Image
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
- if err != nil {
- return images, err
- }
- query.Set("filters", filterJSON)
- }
- if options.MatchName != "" {
- // FIXME rename this parameter, to not be confused with the filters flag
- query.Set("filter", options.MatchName)
- }
- if options.All {
- query.Set("all", "1")
- }
-
- serverResp, err := cli.get(ctx, "/images/json", query, nil)
- if err != nil {
- return images, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&images)
- ensureReaderClosed(serverResp)
- return images, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_load.go b/vendor/github.com/docker/engine-api/client/image_load.go
deleted file mode 100644
index 72f55fd..0000000
--- a/vendor/github.com/docker/engine-api/client/image_load.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package client
-
-import (
- "io"
- "net/url"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
-)
-
-// ImageLoad loads an image in the docker host from the client host.
-// It's up to the caller to close the io.ReadCloser in the
-// ImageLoadResponse returned by this function.
-func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
- v := url.Values{}
- v.Set("quiet", "0")
- if quiet {
- v.Set("quiet", "1")
- }
- headers := map[string][]string{"Content-Type": {"application/x-tar"}}
- resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
- if err != nil {
- return types.ImageLoadResponse{}, err
- }
- return types.ImageLoadResponse{
- Body: resp.body,
- JSON: resp.header.Get("Content-Type") == "application/json",
- }, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_pull.go b/vendor/github.com/docker/engine-api/client/image_pull.go
deleted file mode 100644
index e2c49ec..0000000
--- a/vendor/github.com/docker/engine-api/client/image_pull.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package client
-
-import (
- "io"
- "net/http"
- "net/url"
-
- "golang.org/x/net/context"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/reference"
-)
-
-// ImagePull requests the docker host to pull an image from a remote registry.
-// It executes the privileged function if the operation is unauthorized
-// and it tries one more time.
-// It's up to the caller to handle the io.ReadCloser and close it properly.
-//
-// FIXME(vdemeester): there is currently used in a few way in docker/docker
-// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
-// - if in trusted content, ref is used to pass the reference name, and tag for the digest
-func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) {
- repository, tag, err := reference.Parse(ref)
- if err != nil {
- return nil, err
- }
-
- query := url.Values{}
- query.Set("fromImage", repository)
- if tag != "" && !options.All {
- query.Set("tag", tag)
- }
-
- resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
- if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return nil, privilegeErr
- }
- resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
- }
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_push.go b/vendor/github.com/docker/engine-api/client/image_push.go
deleted file mode 100644
index a30fc3b..0000000
--- a/vendor/github.com/docker/engine-api/client/image_push.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package client
-
-import (
- "errors"
- "io"
- "net/http"
- "net/url"
-
- "golang.org/x/net/context"
-
- distreference "github.com/docker/distribution/reference"
- "github.com/docker/engine-api/types"
-)
-
-// ImagePush requests the docker host to push an image to a remote registry.
-// It executes the privileged function if the operation is unauthorized
-// and it tries one more time.
-// It's up to the caller to handle the io.ReadCloser and close it properly.
-func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) {
- distributionRef, err := distreference.ParseNamed(ref)
- if err != nil {
- return nil, err
- }
-
- if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
- return nil, errors.New("cannot push a digest reference")
- }
-
- var tag = ""
- if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged {
- tag = nameTaggedRef.Tag()
- }
-
- query := url.Values{}
- query.Set("tag", tag)
-
- resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth)
- if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return nil, privilegeErr
- }
- resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader)
- }
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_remove.go b/vendor/github.com/docker/engine-api/client/image_remove.go
deleted file mode 100644
index 4722432..0000000
--- a/vendor/github.com/docker/engine-api/client/image_remove.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ImageRemove removes an image from the docker host.
-func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) {
- query := url.Values{}
-
- if options.Force {
- query.Set("force", "1")
- }
- if !options.PruneChildren {
- query.Set("noprune", "1")
- }
-
- resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
- if err != nil {
- return nil, err
- }
-
- var dels []types.ImageDelete
- err = json.NewDecoder(resp.body).Decode(&dels)
- ensureReaderClosed(resp)
- return dels, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_save.go b/vendor/github.com/docker/engine-api/client/image_save.go
deleted file mode 100644
index ecac880..0000000
--- a/vendor/github.com/docker/engine-api/client/image_save.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client
-
-import (
- "io"
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
-// It's up to the caller to store the images and close the stream.
-func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
- query := url.Values{
- "names": imageIDs,
- }
-
- resp, err := cli.get(ctx, "/images/get", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_search.go b/vendor/github.com/docker/engine-api/client/image_search.go
deleted file mode 100644
index b7a7de3..0000000
--- a/vendor/github.com/docker/engine-api/client/image_search.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- "github.com/docker/engine-api/types/registry"
- "golang.org/x/net/context"
-)
-
-// ImageSearch makes the docker host to search by a term in a remote registry.
-// The list of results is not sorted in any fashion.
-func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) {
- var results []registry.SearchResult
- query := url.Values{}
- query.Set("term", term)
- query.Set("limit", fmt.Sprintf("%d", options.Limit))
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filters)
- if err != nil {
- return results, err
- }
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
- if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return results, privilegeErr
- }
- resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
- }
- if err != nil {
- return results, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&results)
- ensureReaderClosed(resp)
- return results, err
-}
-
-func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.get(ctx, "/images/search", query, headers)
-}
diff --git a/vendor/github.com/docker/engine-api/client/image_tag.go b/vendor/github.com/docker/engine-api/client/image_tag.go
deleted file mode 100644
index 7182913..0000000
--- a/vendor/github.com/docker/engine-api/client/image_tag.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package client
-
-import (
- "errors"
- "fmt"
- "net/url"
-
- "golang.org/x/net/context"
-
- distreference "github.com/docker/distribution/reference"
- "github.com/docker/engine-api/types/reference"
-)
-
-// ImageTag tags an image in the docker host
-func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error {
- distributionRef, err := distreference.ParseNamed(ref)
- if err != nil {
- return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref)
- }
-
- if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical {
- return errors.New("refusing to create a tag with a digest reference")
- }
-
- tag := reference.GetTagFromNamedRef(distributionRef)
-
- query := url.Values{}
- query.Set("repo", distributionRef.Name())
- query.Set("tag", tag)
-
- resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/info.go b/vendor/github.com/docker/engine-api/client/info.go
deleted file mode 100644
index ff0958d..0000000
--- a/vendor/github.com/docker/engine-api/client/info.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// Info returns information about the docker server.
-func (cli *Client) Info(ctx context.Context) (types.Info, error) {
- var info types.Info
- serverResp, err := cli.get(ctx, "/info", url.Values{}, nil)
- if err != nil {
- return info, err
- }
- defer ensureReaderClosed(serverResp)
-
- if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
- return info, fmt.Errorf("Error reading remote info: %v", err)
- }
-
- return info, nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/interface.go b/vendor/github.com/docker/engine-api/client/interface.go
deleted file mode 100644
index 924cf5d..0000000
--- a/vendor/github.com/docker/engine-api/client/interface.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package client
-
-import (
- "io"
- "time"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/container"
- "github.com/docker/engine-api/types/filters"
- "github.com/docker/engine-api/types/network"
- "github.com/docker/engine-api/types/registry"
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
-type CommonAPIClient interface {
- ContainerAPIClient
- ImageAPIClient
- NodeAPIClient
- NetworkAPIClient
- ServiceAPIClient
- SwarmAPIClient
- SystemAPIClient
- VolumeAPIClient
- ClientVersion() string
- ServerVersion(ctx context.Context) (types.Version, error)
- UpdateClientVersion(v string)
-}
-
-// ContainerAPIClient defines API client methods for the containers
-type ContainerAPIClient interface {
- ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
- ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error)
- ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error)
- ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error)
- ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error)
- ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error)
- ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
- ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
- ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error
- ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
- ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
- ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
- ContainerKill(ctx context.Context, container, signal string) error
- ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
- ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
- ContainerPause(ctx context.Context, container string) error
- ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
- ContainerRename(ctx context.Context, container, newContainerName string) error
- ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
- ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
- ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
- ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error)
- ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
- ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
- ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error)
- ContainerUnpause(ctx context.Context, container string) error
- ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (types.ContainerUpdateResponse, error)
- ContainerWait(ctx context.Context, container string) (int, error)
- CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
- CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
-}
-
-// ImageAPIClient defines API client methods for the images
-type ImageAPIClient interface {
- ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
- ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
- ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error)
- ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
- ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
- ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error)
- ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
- ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
- ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
- ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error)
- ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
- ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
- ImageTag(ctx context.Context, image, ref string) error
-}
-
-// NetworkAPIClient defines API client methods for the networks
-type NetworkAPIClient interface {
- NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error
- NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
- NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error
- NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error)
- NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error)
- NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
- NetworkRemove(ctx context.Context, networkID string) error
-}
-
-// NodeAPIClient defines API client methods for the nodes
-type NodeAPIClient interface {
- NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
- NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
- NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
- NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
-}
-
-// ServiceAPIClient defines API client methods for the services
-type ServiceAPIClient interface {
- ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
- ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error)
- ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
- ServiceRemove(ctx context.Context, serviceID string) error
- ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error
- TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
- TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
-}
-
-// SwarmAPIClient defines API client methods for the swarm
-type SwarmAPIClient interface {
- SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
- SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
- SwarmLeave(ctx context.Context, force bool) error
- SwarmInspect(ctx context.Context) (swarm.Swarm, error)
- SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error
-}
-
-// SystemAPIClient defines API client methods for the system
-type SystemAPIClient interface {
- Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error)
- Info(ctx context.Context) (types.Info, error)
- RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error)
-}
-
-// VolumeAPIClient defines API client methods for the volumes
-type VolumeAPIClient interface {
- VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error)
- VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
- VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
- VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error)
- VolumeRemove(ctx context.Context, volumeID string, force bool) error
-}
diff --git a/vendor/github.com/docker/engine-api/client/interface_experimental.go b/vendor/github.com/docker/engine-api/client/interface_experimental.go
deleted file mode 100644
index 1835995..0000000
--- a/vendor/github.com/docker/engine-api/client/interface_experimental.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// APIClient is an interface that clients that talk with a docker server must implement.
-type APIClient interface {
- CommonAPIClient
- CheckpointAPIClient
- PluginAPIClient
-}
-
-// CheckpointAPIClient defines API client methods for the checkpoints
-type CheckpointAPIClient interface {
- CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
- CheckpointDelete(ctx context.Context, container string, checkpointID string) error
- CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error)
-}
-
-// PluginAPIClient defines API client methods for the plugins
-type PluginAPIClient interface {
- PluginList(ctx context.Context) (types.PluginsListResponse, error)
- PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
- PluginEnable(ctx context.Context, name string) error
- PluginDisable(ctx context.Context, name string) error
- PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error
- PluginPush(ctx context.Context, name string, registryAuth string) error
- PluginSet(ctx context.Context, name string, args []string) error
- PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
-}
-
-// Ensure that Client always implements APIClient.
-var _ APIClient = &Client{}
diff --git a/vendor/github.com/docker/engine-api/client/interface_stable.go b/vendor/github.com/docker/engine-api/client/interface_stable.go
deleted file mode 100644
index 496f522..0000000
--- a/vendor/github.com/docker/engine-api/client/interface_stable.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !experimental
-
-package client
-
-// APIClient is an interface that clients that talk with a docker server must implement.
-type APIClient interface {
- CommonAPIClient
-}
-
-// Ensure that Client always implements APIClient.
-var _ APIClient = &Client{}
diff --git a/vendor/github.com/docker/engine-api/client/login.go b/vendor/github.com/docker/engine-api/client/login.go
deleted file mode 100644
index b14f239..0000000
--- a/vendor/github.com/docker/engine-api/client/login.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/http"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// RegistryLogin authenticates the docker server with a given docker registry.
-// It returns UnauthorizerError when the authentication fails.
-func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) {
- resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
-
- if resp.statusCode == http.StatusUnauthorized {
- return types.AuthResponse{}, unauthorizedError{err}
- }
- if err != nil {
- return types.AuthResponse{}, err
- }
-
- var response types.AuthResponse
- err = json.NewDecoder(resp.body).Decode(&response)
- ensureReaderClosed(resp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/network_connect.go b/vendor/github.com/docker/engine-api/client/network_connect.go
deleted file mode 100644
index 9a402a3..0000000
--- a/vendor/github.com/docker/engine-api/client/network_connect.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package client
-
-import (
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/network"
- "golang.org/x/net/context"
-)
-
-// NetworkConnect connects a container to an existent network in the docker host.
-func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
- nc := types.NetworkConnect{
- Container: containerID,
- EndpointConfig: config,
- }
- resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/network_create.go b/vendor/github.com/docker/engine-api/client/network_create.go
deleted file mode 100644
index c9c0b9f..0000000
--- a/vendor/github.com/docker/engine-api/client/network_create.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// NetworkCreate creates a new network in the docker host.
-func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
- networkCreateRequest := types.NetworkCreateRequest{
- NetworkCreate: options,
- Name: name,
- }
- var response types.NetworkCreateResponse
- serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
- if err != nil {
- return response, err
- }
-
- json.NewDecoder(serverResp.body).Decode(&response)
- ensureReaderClosed(serverResp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/network_disconnect.go b/vendor/github.com/docker/engine-api/client/network_disconnect.go
deleted file mode 100644
index a3e3367..0000000
--- a/vendor/github.com/docker/engine-api/client/network_disconnect.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package client
-
-import (
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// NetworkDisconnect disconnects a container from an existent network in the docker host.
-func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
- nd := types.NetworkDisconnect{Container: containerID, Force: force}
- resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/network_inspect.go b/vendor/github.com/docker/engine-api/client/network_inspect.go
deleted file mode 100644
index e22fcd6..0000000
--- a/vendor/github.com/docker/engine-api/client/network_inspect.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// NetworkInspect returns the information for a specific network configured in the docker host.
-func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) {
- networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID)
- return networkResource, err
-}
-
-// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
-func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) {
- var networkResource types.NetworkResource
- resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil)
- if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return networkResource, nil, networkNotFoundError{networkID}
- }
- return networkResource, nil, err
- }
- defer ensureReaderClosed(resp)
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return networkResource, nil, err
- }
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&networkResource)
- return networkResource, body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/network_list.go b/vendor/github.com/docker/engine-api/client/network_list.go
deleted file mode 100644
index 0569552..0000000
--- a/vendor/github.com/docker/engine-api/client/network_list.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- "golang.org/x/net/context"
-)
-
-// NetworkList returns the list of networks configured in the docker host.
-func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
- query := url.Values{}
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
- var networkResources []types.NetworkResource
- resp, err := cli.get(ctx, "/networks", query, nil)
- if err != nil {
- return networkResources, err
- }
- err = json.NewDecoder(resp.body).Decode(&networkResources)
- ensureReaderClosed(resp)
- return networkResources, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/network_remove.go b/vendor/github.com/docker/engine-api/client/network_remove.go
deleted file mode 100644
index 6bd6748..0000000
--- a/vendor/github.com/docker/engine-api/client/network_remove.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client
-
-import "golang.org/x/net/context"
-
-// NetworkRemove removes an existent network from the docker host.
-func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
- resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/node_inspect.go b/vendor/github.com/docker/engine-api/client/node_inspect.go
deleted file mode 100644
index 5f555bb..0000000
--- a/vendor/github.com/docker/engine-api/client/node_inspect.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package client
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
-
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// NodeInspectWithRaw returns the node information.
-func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
- serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
- if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Node{}, nil, nodeNotFoundError{nodeID}
- }
- return swarm.Node{}, nil, err
- }
- defer ensureReaderClosed(serverResp)
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Node{}, nil, err
- }
-
- var response swarm.Node
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/node_list.go b/vendor/github.com/docker/engine-api/client/node_list.go
deleted file mode 100644
index 57cf148..0000000
--- a/vendor/github.com/docker/engine-api/client/node_list.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// NodeList returns the list of nodes.
-func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
- query := url.Values{}
-
- if options.Filter.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filter)
-
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/nodes", query, nil)
- if err != nil {
- return nil, err
- }
-
- var nodes []swarm.Node
- err = json.NewDecoder(resp.body).Decode(&nodes)
- ensureReaderClosed(resp)
- return nodes, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/node_remove.go b/vendor/github.com/docker/engine-api/client/node_remove.go
deleted file mode 100644
index a9cf8ba..0000000
--- a/vendor/github.com/docker/engine-api/client/node_remove.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client
-
-import (
- "net/url"
-
- "github.com/docker/engine-api/types"
-
- "golang.org/x/net/context"
-)
-
-// NodeRemove removes a Node.
-func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error {
- query := url.Values{}
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/node_update.go b/vendor/github.com/docker/engine-api/client/node_update.go
deleted file mode 100644
index 4722211..0000000
--- a/vendor/github.com/docker/engine-api/client/node_update.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package client
-
-import (
- "net/url"
- "strconv"
-
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// NodeUpdate updates a Node.
-func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_disable.go b/vendor/github.com/docker/engine-api/client/plugin_disable.go
deleted file mode 100644
index 893fc6e..0000000
--- a/vendor/github.com/docker/engine-api/client/plugin_disable.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "golang.org/x/net/context"
-)
-
-// PluginDisable disables a plugin
-func (cli *Client) PluginDisable(ctx context.Context, name string) error {
- resp, err := cli.post(ctx, "/plugins/"+name+"/disable", nil, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_enable.go b/vendor/github.com/docker/engine-api/client/plugin_enable.go
deleted file mode 100644
index 84422ab..0000000
--- a/vendor/github.com/docker/engine-api/client/plugin_enable.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "golang.org/x/net/context"
-)
-
-// PluginEnable enables a plugin
-func (cli *Client) PluginEnable(ctx context.Context, name string) error {
- resp, err := cli.post(ctx, "/plugins/"+name+"/enable", nil, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_inspect.go b/vendor/github.com/docker/engine-api/client/plugin_inspect.go
deleted file mode 100644
index 1e58af3..0000000
--- a/vendor/github.com/docker/engine-api/client/plugin_inspect.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// PluginInspectWithRaw inspects an existing plugin
-func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
- resp, err := cli.get(ctx, "/plugins/"+name, nil, nil)
- if err != nil {
- return nil, nil, err
- }
-
- defer ensureReaderClosed(resp)
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return nil, nil, err
- }
- var p types.Plugin
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&p)
- return &p, body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_install.go b/vendor/github.com/docker/engine-api/client/plugin_install.go
deleted file mode 100644
index 6cf50ff..0000000
--- a/vendor/github.com/docker/engine-api/client/plugin_install.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "encoding/json"
- "net/http"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// PluginInstall installs a plugin
-func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error {
- // FIXME(vdemeester) name is a ref, we might want to parse/validate it here.
- query := url.Values{}
- query.Set("name", name)
- resp, err := cli.tryPluginPull(ctx, query, options.RegistryAuth)
- if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- ensureReaderClosed(resp)
- return privilegeErr
- }
- resp, err = cli.tryPluginPull(ctx, query, newAuthHeader)
- }
- if err != nil {
- ensureReaderClosed(resp)
- return err
- }
- var privileges types.PluginPrivileges
- if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
- ensureReaderClosed(resp)
- return err
- }
- ensureReaderClosed(resp)
-
- if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
- accept, err := options.AcceptPermissionsFunc(privileges)
- if err != nil {
- return err
- }
- if !accept {
- resp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
- ensureReaderClosed(resp)
- return pluginPermissionDenied{name}
- }
- }
- if options.Disabled {
- return nil
- }
- return cli.PluginEnable(ctx, name)
-}
-
-func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/plugins/pull", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_list.go b/vendor/github.com/docker/engine-api/client/plugin_list.go
deleted file mode 100644
index 7f2e2f2..0000000
--- a/vendor/github.com/docker/engine-api/client/plugin_list.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// PluginList returns the installed plugins
-func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) {
- var plugins types.PluginsListResponse
- resp, err := cli.get(ctx, "/plugins", nil, nil)
- if err != nil {
- return plugins, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&plugins)
- ensureReaderClosed(resp)
- return plugins, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_push.go b/vendor/github.com/docker/engine-api/client/plugin_push.go
deleted file mode 100644
index 3afea5e..0000000
--- a/vendor/github.com/docker/engine-api/client/plugin_push.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "golang.org/x/net/context"
-)
-
-// PluginPush pushes a plugin to a registry
-func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) error {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_remove.go b/vendor/github.com/docker/engine-api/client/plugin_remove.go
deleted file mode 100644
index 9fe18b5..0000000
--- a/vendor/github.com/docker/engine-api/client/plugin_remove.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "net/url"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// PluginRemove removes a plugin
-func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error {
- query := url.Values{}
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/plugin_set.go b/vendor/github.com/docker/engine-api/client/plugin_set.go
deleted file mode 100644
index fb40f38..0000000
--- a/vendor/github.com/docker/engine-api/client/plugin_set.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build experimental
-
-package client
-
-import (
- "golang.org/x/net/context"
-)
-
-// PluginSet modifies settings for an existing plugin
-func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error {
- resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/request.go b/vendor/github.com/docker/engine-api/client/request.go
deleted file mode 100644
index 26e8769..0000000
--- a/vendor/github.com/docker/engine-api/client/request.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package client
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/docker/engine-api/client/transport/cancellable"
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/versions"
- "golang.org/x/net/context"
-)
-
-// serverResponse is a wrapper for http API responses.
-type serverResponse struct {
- body io.ReadCloser
- header http.Header
- statusCode int
-}
-
-// head sends an http request to the docker API using the method HEAD.
-func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, "HEAD", path, query, nil, headers)
-}
-
-// getWithContext sends an http request to the docker API using the method GET with a specific go context.
-func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, "GET", path, query, nil, headers)
-}
-
-// postWithContext sends an http request to the docker API using the method POST with a specific go context.
-func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, "POST", path, query, obj, headers)
-}
-
-func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
- return cli.sendClientRequest(ctx, "POST", path, query, body, headers)
-}
-
-// put sends an http request to the docker API using the method PUT.
-func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, "PUT", path, query, obj, headers)
-}
-
-// put sends an http request to the docker API using the method PUT.
-func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
- return cli.sendClientRequest(ctx, "PUT", path, query, body, headers)
-}
-
-// delete sends an http request to the docker API using the method DELETE.
-func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, "DELETE", path, query, nil, headers)
-}
-
-func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
- var body io.Reader
-
- if obj != nil {
- var err error
- body, err = encodeData(obj)
- if err != nil {
- return serverResponse{}, err
- }
- if headers == nil {
- headers = make(map[string][]string)
- }
- headers["Content-Type"] = []string{"application/json"}
- }
-
- return cli.sendClientRequest(ctx, method, path, query, body, headers)
-}
-
-func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
- serverResp := serverResponse{
- body: nil,
- statusCode: -1,
- }
-
- expectedPayload := (method == "POST" || method == "PUT")
- if expectedPayload && body == nil {
- body = bytes.NewReader([]byte{})
- }
-
- req, err := cli.newRequest(method, path, query, body, headers)
- if err != nil {
- return serverResp, err
- }
-
- if cli.proto == "unix" || cli.proto == "npipe" {
- // For local communications, it doesn't matter what the host is. We just
- // need a valid and meaningful host name. (See #189)
- req.Host = "docker"
- }
- req.URL.Host = cli.addr
- req.URL.Scheme = cli.transport.Scheme()
-
- if expectedPayload && req.Header.Get("Content-Type") == "" {
- req.Header.Set("Content-Type", "text/plain")
- }
-
- resp, err := cancellable.Do(ctx, cli.transport, req)
- if err != nil {
- if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") {
- return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
- }
-
- if cli.transport.Secure() && strings.Contains(err.Error(), "bad certificate") {
- return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err)
- }
-
- // Don't decorate context sentinel errors; users may be comparing to
- // them directly.
- switch err {
- case context.Canceled, context.DeadlineExceeded:
- return serverResp, err
- }
-
- if err, ok := err.(net.Error); ok {
- if err.Timeout() {
- return serverResp, ErrorConnectionFailed(cli.host)
- }
- if !err.Temporary() {
- if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
- return serverResp, ErrorConnectionFailed(cli.host)
- }
- }
- }
- return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err)
- }
-
- if resp != nil {
- serverResp.statusCode = resp.StatusCode
- }
-
- if serverResp.statusCode < 200 || serverResp.statusCode >= 400 {
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return serverResp, err
- }
- if len(body) == 0 {
- return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL)
- }
-
- var errorMessage string
- if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) &&
- resp.Header.Get("Content-Type") == "application/json" {
- var errorResponse types.ErrorResponse
- if err := json.Unmarshal(body, &errorResponse); err != nil {
- return serverResp, fmt.Errorf("Error reading JSON: %v", err)
- }
- errorMessage = errorResponse.Message
- } else {
- errorMessage = string(body)
- }
-
- return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage))
- }
-
- serverResp.body = resp.Body
- serverResp.header = resp.Header
- return serverResp, nil
-}
-
-func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) {
- apiPath := cli.getAPIPath(path, query)
- req, err := http.NewRequest(method, apiPath, body)
- if err != nil {
- return nil, err
- }
-
- // Add CLI Config's HTTP Headers BEFORE we set the Docker headers
- // then the user can't change OUR headers
- for k, v := range cli.customHTTPHeaders {
- req.Header.Set(k, v)
- }
-
- if headers != nil {
- for k, v := range headers {
- req.Header[k] = v
- }
- }
-
- return req, nil
-}
-
-func encodeData(data interface{}) (*bytes.Buffer, error) {
- params := bytes.NewBuffer(nil)
- if data != nil {
- if err := json.NewEncoder(params).Encode(data); err != nil {
- return nil, err
- }
- }
- return params, nil
-}
-
-func ensureReaderClosed(response serverResponse) {
- if body := response.body; body != nil {
- // Drain up to 512 bytes and close the body to let the Transport reuse the connection
- io.CopyN(ioutil.Discard, body, 512)
- response.body.Close()
- }
-}
diff --git a/vendor/github.com/docker/engine-api/client/service_create.go b/vendor/github.com/docker/engine-api/client/service_create.go
deleted file mode 100644
index 7349a98..0000000
--- a/vendor/github.com/docker/engine-api/client/service_create.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// ServiceCreate creates a new Service.
-func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
- var headers map[string][]string
-
- if options.EncodedRegistryAuth != "" {
- headers = map[string][]string{
- "X-Registry-Auth": []string{options.EncodedRegistryAuth},
- }
- }
-
- var response types.ServiceCreateResponse
- resp, err := cli.post(ctx, "/services/create", nil, service, headers)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- ensureReaderClosed(resp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/service_inspect.go b/vendor/github.com/docker/engine-api/client/service_inspect.go
deleted file mode 100644
index 958cd66..0000000
--- a/vendor/github.com/docker/engine-api/client/service_inspect.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package client
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
-
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// ServiceInspectWithRaw returns the service information and the raw data.
-func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) {
- serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil)
- if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Service{}, nil, serviceNotFoundError{serviceID}
- }
- return swarm.Service{}, nil, err
- }
- defer ensureReaderClosed(serverResp)
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Service{}, nil, err
- }
-
- var response swarm.Service
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/service_list.go b/vendor/github.com/docker/engine-api/client/service_list.go
deleted file mode 100644
index b48964a..0000000
--- a/vendor/github.com/docker/engine-api/client/service_list.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// ServiceList returns the list of services.
-func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
- query := url.Values{}
-
- if options.Filter.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filter)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/services", query, nil)
- if err != nil {
- return nil, err
- }
-
- var services []swarm.Service
- err = json.NewDecoder(resp.body).Decode(&services)
- ensureReaderClosed(resp)
- return services, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/service_remove.go b/vendor/github.com/docker/engine-api/client/service_remove.go
deleted file mode 100644
index a9331f9..0000000
--- a/vendor/github.com/docker/engine-api/client/service_remove.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client
-
-import "golang.org/x/net/context"
-
-// ServiceRemove kills and removes a service.
-func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
- resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/service_update.go b/vendor/github.com/docker/engine-api/client/service_update.go
deleted file mode 100644
index ee8b461..0000000
--- a/vendor/github.com/docker/engine-api/client/service_update.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package client
-
-import (
- "net/url"
- "strconv"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// ServiceUpdate updates a Service.
-func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error {
- var (
- headers map[string][]string
- query = url.Values{}
- )
-
- if options.EncodedRegistryAuth != "" {
- headers = map[string][]string{
- "X-Registry-Auth": []string{options.EncodedRegistryAuth},
- }
- }
-
- query.Set("version", strconv.FormatUint(version.Index, 10))
-
- resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_init.go b/vendor/github.com/docker/engine-api/client/swarm_init.go
deleted file mode 100644
index 68f0a74..0000000
--- a/vendor/github.com/docker/engine-api/client/swarm_init.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// SwarmInit initializes the Swarm.
-func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
- serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
- if err != nil {
- return "", err
- }
-
- var response string
- err = json.NewDecoder(serverResp.body).Decode(&response)
- ensureReaderClosed(serverResp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_inspect.go b/vendor/github.com/docker/engine-api/client/swarm_inspect.go
deleted file mode 100644
index d67c7c0..0000000
--- a/vendor/github.com/docker/engine-api/client/swarm_inspect.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// SwarmInspect inspects the Swarm.
-func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
- serverResp, err := cli.get(ctx, "/swarm", nil, nil)
- if err != nil {
- return swarm.Swarm{}, err
- }
-
- var response swarm.Swarm
- err = json.NewDecoder(serverResp.body).Decode(&response)
- ensureReaderClosed(serverResp)
- return response, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_join.go b/vendor/github.com/docker/engine-api/client/swarm_join.go
deleted file mode 100644
index a9b14e0..0000000
--- a/vendor/github.com/docker/engine-api/client/swarm_join.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package client
-
-import (
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// SwarmJoin joins the Swarm.
-func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
- resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_leave.go b/vendor/github.com/docker/engine-api/client/swarm_leave.go
deleted file mode 100644
index a4df732..0000000
--- a/vendor/github.com/docker/engine-api/client/swarm_leave.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package client
-
-import (
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-// SwarmLeave leaves the Swarm.
-func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
- query := url.Values{}
- if force {
- query.Set("force", "1")
- }
- resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/swarm_update.go b/vendor/github.com/docker/engine-api/client/swarm_update.go
deleted file mode 100644
index 5adec81..0000000
--- a/vendor/github.com/docker/engine-api/client/swarm_update.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client
-
-import (
- "fmt"
- "net/url"
- "strconv"
-
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// SwarmUpdate updates the Swarm.
-func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error {
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken))
- query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken))
- resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/task_inspect.go b/vendor/github.com/docker/engine-api/client/task_inspect.go
deleted file mode 100644
index 3cac888..0000000
--- a/vendor/github.com/docker/engine-api/client/task_inspect.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package client
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
-
- "github.com/docker/engine-api/types/swarm"
-
- "golang.org/x/net/context"
-)
-
-// TaskInspectWithRaw returns the task information and its raw representation..
-func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
- serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
- if err != nil {
- if serverResp.statusCode == http.StatusNotFound {
- return swarm.Task{}, nil, taskNotFoundError{taskID}
- }
- return swarm.Task{}, nil, err
- }
- defer ensureReaderClosed(serverResp)
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Task{}, nil, err
- }
-
- var response swarm.Task
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/task_list.go b/vendor/github.com/docker/engine-api/client/task_list.go
deleted file mode 100644
index 4604513..0000000
--- a/vendor/github.com/docker/engine-api/client/task_list.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- "github.com/docker/engine-api/types/swarm"
- "golang.org/x/net/context"
-)
-
-// TaskList returns the list of tasks.
-func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
- query := url.Values{}
-
- if options.Filter.Len() > 0 {
- filterJSON, err := filters.ToParam(options.Filter)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/tasks", query, nil)
- if err != nil {
- return nil, err
- }
-
- var tasks []swarm.Task
- err = json.NewDecoder(resp.body).Decode(&tasks)
- ensureReaderClosed(resp)
- return tasks, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE b/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE
deleted file mode 100644
index 6a66aea..0000000
--- a/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go
deleted file mode 100644
index 11dff60..0000000
--- a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.5
-
-package cancellable
-
-import (
- "net/http"
-
- "github.com/docker/engine-api/client/transport"
-)
-
-func canceler(client transport.Sender, req *http.Request) func() {
- // TODO(djd): Respect any existing value of req.Cancel.
- ch := make(chan struct{})
- req.Cancel = ch
-
- return func() {
- close(ch)
- }
-}
diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go
deleted file mode 100644
index 8ff2845..0000000
--- a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.5
-
-package cancellable
-
-import (
- "net/http"
-
- "github.com/docker/engine-api/client/transport"
-)
-
-type requestCanceler interface {
- CancelRequest(*http.Request)
-}
-
-func canceler(client transport.Sender, req *http.Request) func() {
- rc, ok := client.(requestCanceler)
- if !ok {
- return func() {}
- }
- return func() {
- rc.CancelRequest(req)
- }
-}
diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go
deleted file mode 100644
index 3e4b179..0000000
--- a/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cancellable provides helper function to cancel http requests.
-package cancellable
-
-import (
- "io"
- "net/http"
- "sync"
-
- "github.com/docker/engine-api/client/transport"
-
- "golang.org/x/net/context"
-)
-
-func nop() {}
-
-var (
- testHookContextDoneBeforeHeaders = nop
- testHookDoReturned = nop
- testHookDidBodyClose = nop
-)
-
-// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response.
-// If the client is nil, http.DefaultClient is used.
-// If the context is canceled or times out, ctx.Err() will be returned.
-//
-// FORK INFORMATION:
-//
-// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by
-// taking a Sender interface rather than a *http.Client directly. That allow us to use
-// this function with mocked clients and hijacked connections.
-func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) {
- if client == nil {
- client = http.DefaultClient
- }
-
- // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go.
- cancel := canceler(client, req)
-
- type responseAndError struct {
- resp *http.Response
- err error
- }
- result := make(chan responseAndError, 1)
-
- go func() {
- resp, err := client.Do(req)
- testHookDoReturned()
- result <- responseAndError{resp, err}
- }()
-
- var resp *http.Response
-
- select {
- case <-ctx.Done():
- testHookContextDoneBeforeHeaders()
- cancel()
- // Clean up after the goroutine calling client.Do:
- go func() {
- if r := <-result; r.resp != nil && r.resp.Body != nil {
- testHookDidBodyClose()
- r.resp.Body.Close()
- }
- }()
- return nil, ctx.Err()
- case r := <-result:
- var err error
- resp, err = r.resp, r.err
- if err != nil {
- return resp, err
- }
- }
-
- c := make(chan struct{})
- go func() {
- select {
- case <-ctx.Done():
- cancel()
- case <-c:
- // The response's Body is closed.
- }
- }()
- resp.Body = ¬ifyingReader{ReadCloser: resp.Body, notify: c}
-
- return resp, nil
-}
-
-// notifyingReader is an io.ReadCloser that closes the notify channel after
-// Close is called or a Read fails on the underlying ReadCloser.
-type notifyingReader struct {
- io.ReadCloser
- notify chan<- struct{}
- notifyOnce sync.Once
-}
-
-func (r *notifyingReader) Read(p []byte) (int, error) {
- n, err := r.ReadCloser.Read(p)
- if err != nil {
- r.notifyOnce.Do(func() {
- close(r.notify)
- })
- }
- return n, err
-}
-
-func (r *notifyingReader) Close() error {
- err := r.ReadCloser.Close()
- r.notifyOnce.Do(func() {
- close(r.notify)
- })
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/client/transport/client.go b/vendor/github.com/docker/engine-api/client/transport/client.go
deleted file mode 100644
index 13d4b3a..0000000
--- a/vendor/github.com/docker/engine-api/client/transport/client.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package transport
-
-import (
- "crypto/tls"
- "net/http"
-)
-
-// Sender is an interface that clients must implement
-// to be able to send requests to a remote connection.
-type Sender interface {
- // Do sends request to a remote endpoint.
- Do(*http.Request) (*http.Response, error)
-}
-
-// Client is an interface that abstracts all remote connections.
-type Client interface {
- Sender
- // Secure tells whether the connection is secure or not.
- Secure() bool
- // Scheme returns the connection protocol the client uses.
- Scheme() string
- // TLSConfig returns any TLS configuration the client uses.
- TLSConfig() *tls.Config
-}
-
-// tlsInfo returns information about the TLS configuration.
-type tlsInfo struct {
- tlsConfig *tls.Config
-}
-
-// TLSConfig returns the TLS configuration.
-func (t *tlsInfo) TLSConfig() *tls.Config {
- return t.tlsConfig
-}
-
-// Scheme returns protocol scheme to use.
-func (t *tlsInfo) Scheme() string {
- if t.tlsConfig != nil {
- return "https"
- }
- return "http"
-}
-
-// Secure returns true if there is a TLS configuration.
-func (t *tlsInfo) Secure() bool {
- return t.tlsConfig != nil
-}
diff --git a/vendor/github.com/docker/engine-api/client/transport/tlsconfig_clone.go b/vendor/github.com/docker/engine-api/client/transport/tlsconfig_clone.go
deleted file mode 100644
index 31be0ce..0000000
--- a/vendor/github.com/docker/engine-api/client/transport/tlsconfig_clone.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !go1.7
-
-package transport
-
-import "crypto/tls"
-
-// TLSConfigClone returns a clone of tls.Config. This function is provided for
-// compatibility for go1.7 that doesn't include this method in stdlib.
-func TLSConfigClone(c *tls.Config) *tls.Config {
- return c.Clone()
-}
diff --git a/vendor/github.com/docker/engine-api/client/transport/tlsconfig_clone_go17.go b/vendor/github.com/docker/engine-api/client/transport/tlsconfig_clone_go17.go
deleted file mode 100644
index a28c914..0000000
--- a/vendor/github.com/docker/engine-api/client/transport/tlsconfig_clone_go17.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build go1.7
-
-package transport
-
-import "crypto/tls"
-
-// TLSConfigClone returns a clone of tls.Config. This function is provided for
-// compatibility for go1.7 that doesn't include this method in stdlib.
-func TLSConfigClone(c *tls.Config) *tls.Config {
- return &tls.Config{
- Rand: c.Rand,
- Time: c.Time,
- Certificates: c.Certificates,
- NameToCertificate: c.NameToCertificate,
- GetCertificate: c.GetCertificate,
- RootCAs: c.RootCAs,
- NextProtos: c.NextProtos,
- ServerName: c.ServerName,
- ClientAuth: c.ClientAuth,
- ClientCAs: c.ClientCAs,
- InsecureSkipVerify: c.InsecureSkipVerify,
- CipherSuites: c.CipherSuites,
- PreferServerCipherSuites: c.PreferServerCipherSuites,
- SessionTicketsDisabled: c.SessionTicketsDisabled,
- SessionTicketKey: c.SessionTicketKey,
- ClientSessionCache: c.ClientSessionCache,
- MinVersion: c.MinVersion,
- MaxVersion: c.MaxVersion,
- CurvePreferences: c.CurvePreferences,
- DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
- Renegotiation: c.Renegotiation,
- }
-}
diff --git a/vendor/github.com/docker/engine-api/client/transport/transport.go b/vendor/github.com/docker/engine-api/client/transport/transport.go
deleted file mode 100644
index ff28af1..0000000
--- a/vendor/github.com/docker/engine-api/client/transport/transport.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Package transport provides function to send request to remote endpoints.
-package transport
-
-import (
- "fmt"
- "net/http"
-
- "github.com/docker/go-connections/sockets"
-)
-
-// apiTransport holds information about the http transport to connect with the API.
-type apiTransport struct {
- *http.Client
- *tlsInfo
- transport *http.Transport
-}
-
-// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client.
-// It uses Docker's default http transport configuration if the client is nil.
-// It does not modify the client's transport if it's not nil.
-func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) {
- var transport *http.Transport
-
- if client != nil {
- tr, ok := client.Transport.(*http.Transport)
- if !ok {
- return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport)
- }
- transport = tr
- } else {
- transport = defaultTransport(proto, addr)
- client = &http.Client{
- Transport: transport,
- }
- }
-
- return &apiTransport{
- Client: client,
- tlsInfo: &tlsInfo{transport.TLSClientConfig},
- transport: transport,
- }, nil
-}
-
-// CancelRequest stops a request execution.
-func (a *apiTransport) CancelRequest(req *http.Request) {
- a.transport.CancelRequest(req)
-}
-
-// defaultTransport creates a new http.Transport with Docker's
-// default transport configuration.
-func defaultTransport(proto, addr string) *http.Transport {
- tr := new(http.Transport)
- sockets.ConfigureTransport(tr, proto, addr)
- return tr
-}
-
-var _ Client = &apiTransport{}
diff --git a/vendor/github.com/docker/engine-api/client/version.go b/vendor/github.com/docker/engine-api/client/version.go
deleted file mode 100644
index e037551..0000000
--- a/vendor/github.com/docker/engine-api/client/version.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// ServerVersion returns information of the docker client and server host.
-func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
- resp, err := cli.get(ctx, "/version", nil, nil)
- if err != nil {
- return types.Version{}, err
- }
-
- var server types.Version
- err = json.NewDecoder(resp.body).Decode(&server)
- ensureReaderClosed(resp)
- return server, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/volume_create.go b/vendor/github.com/docker/engine-api/client/volume_create.go
deleted file mode 100644
index cc1e1c1..0000000
--- a/vendor/github.com/docker/engine-api/client/volume_create.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package client
-
-import (
- "encoding/json"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// VolumeCreate creates a volume in the docker host.
-func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) {
- var volume types.Volume
- resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
- if err != nil {
- return volume, err
- }
- err = json.NewDecoder(resp.body).Decode(&volume)
- ensureReaderClosed(resp)
- return volume, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/volume_inspect.go b/vendor/github.com/docker/engine-api/client/volume_inspect.go
deleted file mode 100644
index 2eaebfa..0000000
--- a/vendor/github.com/docker/engine-api/client/volume_inspect.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
-
- "github.com/docker/engine-api/types"
- "golang.org/x/net/context"
-)
-
-// VolumeInspect returns the information about a specific volume in the docker host.
-func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {
- volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
- return volume, err
-}
-
-// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
-func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
- var volume types.Volume
- resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
- if err != nil {
- if resp.statusCode == http.StatusNotFound {
- return volume, nil, volumeNotFoundError{volumeID}
- }
- return volume, nil, err
- }
- defer ensureReaderClosed(resp)
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return volume, nil, err
- }
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&volume)
- return volume, body, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/volume_list.go b/vendor/github.com/docker/engine-api/client/volume_list.go
deleted file mode 100644
index 7c6ccf8..0000000
--- a/vendor/github.com/docker/engine-api/client/volume_list.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client
-
-import (
- "encoding/json"
- "net/url"
-
- "github.com/docker/engine-api/types"
- "github.com/docker/engine-api/types/filters"
- "golang.org/x/net/context"
-)
-
-// VolumeList returns the volumes configured in the docker host.
-func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) {
- var volumes types.VolumesListResponse
- query := url.Values{}
-
- if filter.Len() > 0 {
- filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
- if err != nil {
- return volumes, err
- }
- query.Set("filters", filterJSON)
- }
- resp, err := cli.get(ctx, "/volumes", query, nil)
- if err != nil {
- return volumes, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&volumes)
- ensureReaderClosed(resp)
- return volumes, err
-}
diff --git a/vendor/github.com/docker/engine-api/client/volume_remove.go b/vendor/github.com/docker/engine-api/client/volume_remove.go
deleted file mode 100644
index 3d5aeff..0000000
--- a/vendor/github.com/docker/engine-api/client/volume_remove.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package client
-
-import (
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-// VolumeRemove removes a volume from the docker host.
-func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error {
- query := url.Values{}
- if force {
- query.Set("force", "1")
- }
- resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/engine-api/types/auth.go b/vendor/github.com/docker/engine-api/types/auth.go
deleted file mode 100644
index 056af6b..0000000
--- a/vendor/github.com/docker/engine-api/types/auth.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package types
-
-// AuthConfig contains authorization information for connecting to a Registry
-type AuthConfig struct {
- Username string `json:"username,omitempty"`
- Password string `json:"password,omitempty"`
- Auth string `json:"auth,omitempty"`
-
- // Email is an optional value associated with the username.
- // This field is deprecated and will be removed in a later
- // version of docker.
- Email string `json:"email,omitempty"`
-
- ServerAddress string `json:"serveraddress,omitempty"`
-
- // IdentityToken is used to authenticate the user and get
- // an access token for the registry.
- IdentityToken string `json:"identitytoken,omitempty"`
-
- // RegistryToken is a bearer token to be sent to a registry
- RegistryToken string `json:"registrytoken,omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go
deleted file mode 100644
index 931ae10..0000000
--- a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package blkiodev
-
-import "fmt"
-
-// WeightDevice is a structure that holds device:weight pair
-type WeightDevice struct {
- Path string
- Weight uint16
-}
-
-func (w *WeightDevice) String() string {
- return fmt.Sprintf("%s:%d", w.Path, w.Weight)
-}
-
-// ThrottleDevice is a structure that holds device:rate_per_second pair
-type ThrottleDevice struct {
- Path string
- Rate uint64
-}
-
-func (t *ThrottleDevice) String() string {
- return fmt.Sprintf("%s:%d", t.Path, t.Rate)
-}
diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go
deleted file mode 100644
index b9245b0..0000000
--- a/vendor/github.com/docker/engine-api/types/client.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package types
-
-import (
- "bufio"
- "io"
- "net"
-
- "github.com/docker/engine-api/types/container"
- "github.com/docker/engine-api/types/filters"
- "github.com/docker/go-units"
-)
-
-// CheckpointCreateOptions holds parameters to create a checkpoint from a container
-type CheckpointCreateOptions struct {
- CheckpointID string
- Exit bool
-}
-
-// ContainerAttachOptions holds parameters to attach to a container.
-type ContainerAttachOptions struct {
- Stream bool
- Stdin bool
- Stdout bool
- Stderr bool
- DetachKeys string
-}
-
-// ContainerCommitOptions holds parameters to commit changes into a container.
-type ContainerCommitOptions struct {
- Reference string
- Comment string
- Author string
- Changes []string
- Pause bool
- Config *container.Config
-}
-
-// ContainerExecInspect holds information returned by exec inspect.
-type ContainerExecInspect struct {
- ExecID string
- ContainerID string
- Running bool
- ExitCode int
-}
-
-// ContainerListOptions holds parameters to list containers with.
-type ContainerListOptions struct {
- Quiet bool
- Size bool
- All bool
- Latest bool
- Since string
- Before string
- Limit int
- Filter filters.Args
-}
-
-// ContainerLogsOptions holds parameters to filter logs with.
-type ContainerLogsOptions struct {
- ShowStdout bool
- ShowStderr bool
- Since string
- Timestamps bool
- Follow bool
- Tail string
- Details bool
-}
-
-// ContainerRemoveOptions holds parameters to remove containers.
-type ContainerRemoveOptions struct {
- RemoveVolumes bool
- RemoveLinks bool
- Force bool
-}
-
-// ContainerStartOptions holds parameters to start containers.
-type ContainerStartOptions struct {
- CheckpointID string
-}
-
-// CopyToContainerOptions holds information
-// about files to copy into a container
-type CopyToContainerOptions struct {
- AllowOverwriteDirWithFile bool
-}
-
-// EventsOptions hold parameters to filter events with.
-type EventsOptions struct {
- Since string
- Until string
- Filters filters.Args
-}
-
-// NetworkListOptions holds parameters to filter the list of networks with.
-type NetworkListOptions struct {
- Filters filters.Args
-}
-
-// HijackedResponse holds connection information for a hijacked request.
-type HijackedResponse struct {
- Conn net.Conn
- Reader *bufio.Reader
-}
-
-// Close closes the hijacked connection and reader.
-func (h *HijackedResponse) Close() {
- h.Conn.Close()
-}
-
-// CloseWriter is an interface that implements structs
-// that close input streams to prevent from writing.
-type CloseWriter interface {
- CloseWrite() error
-}
-
-// CloseWrite closes a readWriter for writing.
-func (h *HijackedResponse) CloseWrite() error {
- if conn, ok := h.Conn.(CloseWriter); ok {
- return conn.CloseWrite()
- }
- return nil
-}
-
-// ImageBuildOptions holds the information
-// necessary to build images.
-type ImageBuildOptions struct {
- Tags []string
- SuppressOutput bool
- RemoteContext string
- NoCache bool
- Remove bool
- ForceRemove bool
- PullParent bool
- Isolation container.Isolation
- CPUSetCPUs string
- CPUSetMems string
- CPUShares int64
- CPUQuota int64
- CPUPeriod int64
- Memory int64
- MemorySwap int64
- CgroupParent string
- ShmSize int64
- Dockerfile string
- Ulimits []*units.Ulimit
- BuildArgs map[string]string
- AuthConfigs map[string]AuthConfig
- Context io.Reader
- Labels map[string]string
- // squash the resulting image's layers to the parent
- // preserves the original image and creates a new one from the parent with all
- // the changes applied to a single layer
- Squash bool
-}
-
-// ImageBuildResponse holds information
-// returned by a server after building
-// an image.
-type ImageBuildResponse struct {
- Body io.ReadCloser
- OSType string
-}
-
-// ImageCreateOptions holds information to create images.
-type ImageCreateOptions struct {
- RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
-}
-
-// ImageImportSource holds source information for ImageImport
-type ImageImportSource struct {
- Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
- SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
-}
-
-// ImageImportOptions holds information to import images from the client host.
-type ImageImportOptions struct {
- Tag string // Tag is the name to tag this image with. This attribute is deprecated.
- Message string // Message is the message to tag the image with
- Changes []string // Changes are the raw changes to apply to this image
-}
-
-// ImageListOptions holds parameters to filter the list of images with.
-type ImageListOptions struct {
- MatchName string
- All bool
- Filters filters.Args
-}
-
-// ImageLoadResponse returns information to the client about a load process.
-type ImageLoadResponse struct {
- // Body must be closed to avoid a resource leak
- Body io.ReadCloser
- JSON bool
-}
-
-// ImagePullOptions holds information to pull images.
-type ImagePullOptions struct {
- All bool
- RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
- PrivilegeFunc RequestPrivilegeFunc
-}
-
-// RequestPrivilegeFunc is a function interface that
-// clients can supply to retry operations after
-// getting an authorization error.
-// This function returns the registry authentication
-// header value in base 64 format, or an error
-// if the privilege request fails.
-type RequestPrivilegeFunc func() (string, error)
-
-//ImagePushOptions holds information to push images.
-type ImagePushOptions ImagePullOptions
-
-// ImageRemoveOptions holds parameters to remove images.
-type ImageRemoveOptions struct {
- Force bool
- PruneChildren bool
-}
-
-// ImageSearchOptions holds parameters to search images with.
-type ImageSearchOptions struct {
- RegistryAuth string
- PrivilegeFunc RequestPrivilegeFunc
- Filters filters.Args
- Limit int
-}
-
-// ResizeOptions holds parameters to resize a tty.
-// It can be used to resize container ttys and
-// exec process ttys too.
-type ResizeOptions struct {
- Height int
- Width int
-}
-
-// VersionResponse holds version information for the client and the server
-type VersionResponse struct {
- Client *Version
- Server *Version
-}
-
-// ServerOK returns true when the client could connect to the docker server
-// and parse the information received. It returns false otherwise.
-func (v VersionResponse) ServerOK() bool {
- return v.Server != nil
-}
-
-// NodeListOptions holds parameters to list nodes with.
-type NodeListOptions struct {
- Filter filters.Args
-}
-
-// NodeRemoveOptions holds parameters to remove nodes with.
-type NodeRemoveOptions struct {
- Force bool
-}
-
-// ServiceCreateOptions contains the options to use when creating a service.
-type ServiceCreateOptions struct {
- // EncodedRegistryAuth is the encoded registry authorization credentials to
- // use when updating the service.
- //
- // This field follows the format of the X-Registry-Auth header.
- EncodedRegistryAuth string
-}
-
-// ServiceCreateResponse contains the information returned to a client
-// on the creation of a new service.
-type ServiceCreateResponse struct {
- // ID is the ID of the created service.
- ID string
-}
-
-// ServiceUpdateOptions contains the options to be used for updating services.
-type ServiceUpdateOptions struct {
- // EncodedRegistryAuth is the encoded registry authorization credentials to
- // use when updating the service.
- //
- // This field follows the format of the X-Registry-Auth header.
- EncodedRegistryAuth string
-
- // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
- // into this field. While it does open API users up to racy writes, most
- // users may not need that level of consistency in practice.
-}
-
-// ServiceListOptions holds parameters to list services with.
-type ServiceListOptions struct {
- Filter filters.Args
-}
-
-// TaskListOptions holds parameters to list tasks with.
-type TaskListOptions struct {
- Filter filters.Args
-}
-
-// PluginRemoveOptions holds parameters to remove plugins.
-type PluginRemoveOptions struct {
- Force bool
-}
diff --git a/vendor/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/engine-api/types/configs.go
deleted file mode 100644
index c371fa1..0000000
--- a/vendor/github.com/docker/engine-api/types/configs.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package types
-
-import (
- "github.com/docker/engine-api/types/container"
- "github.com/docker/engine-api/types/network"
-)
-
-// configs holds structs used for internal communication between the
-// frontend (such as an http server) and the backend (such as the
-// docker daemon).
-
-// ContainerCreateConfig is the parameter set to ContainerCreate()
-type ContainerCreateConfig struct {
- Name string
- Config *container.Config
- HostConfig *container.HostConfig
- NetworkingConfig *network.NetworkingConfig
- AdjustCPUShares bool
-}
-
-// ContainerRmConfig holds arguments for the container remove
-// operation. This struct is used to tell the backend what operations
-// to perform.
-type ContainerRmConfig struct {
- ForceRemove, RemoveVolume, RemoveLink bool
-}
-
-// ContainerCommitConfig contains build configs for commit operation,
-// and is used when making a commit with the current state of the container.
-type ContainerCommitConfig struct {
- Pause bool
- Repo string
- Tag string
- Author string
- Comment string
- // merge container config into commit config before commit
- MergeConfigs bool
- Config *container.Config
-}
-
-// ExecConfig is a small subset of the Config struct that holds the configuration
-// for the exec feature of docker.
-type ExecConfig struct {
- User string // User that will run the command
- Privileged bool // Is the container in privileged mode
- Tty bool // Attach standard streams to a tty.
- AttachStdin bool // Attach the standard input, makes possible user interaction
- AttachStderr bool // Attach the standard error
- AttachStdout bool // Attach the standard output
- Detach bool // Execute in detach mode
- DetachKeys string // Escape keys for detach
- Env []string // Environment variables
- Cmd []string // Execution commands and args
-}
-
-// PluginRmConfig holds arguments for the plugin remove
-// operation. This struct is used to tell the backend what operations
-// to perform.
-type PluginRmConfig struct {
- ForceRemove bool
-}
diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go
deleted file mode 100644
index e300e11..0000000
--- a/vendor/github.com/docker/engine-api/types/container/config.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package container
-
-import (
- "time"
-
- "github.com/docker/engine-api/types/strslice"
- "github.com/docker/go-connections/nat"
-)
-
-// HealthConfig holds configuration settings for the HEALTHCHECK feature.
-type HealthConfig struct {
- // Test is the test to perform to check that the container is healthy.
- // An empty slice means to inherit the default.
- // The options are:
- // {} : inherit healthcheck
- // {"NONE"} : disable healthcheck
- // {"CMD", args...} : exec arguments directly
- // {"CMD-SHELL", command} : run command with system's default shell
- Test []string `json:",omitempty"`
-
- // Zero means to inherit. Durations are expressed as integer nanoseconds.
- Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
- Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
-
- // Retries is the number of consecutive failures needed to consider a container as unhealthy.
- // Zero means inherit.
- Retries int `json:",omitempty"`
-}
-
-// Config contains the configuration data about a container.
-// It should hold only portable information about the container.
-// Here, "portable" means "independent from the host we are running on".
-// Non-portable information *should* appear in HostConfig.
-// All fields added to this struct must be marked `omitempty` to keep getting
-// predictable hashes from the old `v1Compatibility` configuration.
-type Config struct {
- Hostname string // Hostname
- Domainname string // Domainname
- User string // User that will run the command(s) inside the container, also support user:group
- AttachStdin bool // Attach the standard input, makes possible user interaction
- AttachStdout bool // Attach the standard output
- AttachStderr bool // Attach the standard error
- ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports
- Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
- OpenStdin bool // Open stdin
- StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
- Env []string // List of environment variable to set in the container
- Cmd strslice.StrSlice // Command to run when starting the container
- Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
- ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
- Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
- Volumes map[string]struct{} // List of volumes (mounts) used for the container
- WorkingDir string // Current directory (PWD) in the command will be launched
- Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
- NetworkDisabled bool `json:",omitempty"` // Is network disabled
- MacAddress string `json:",omitempty"` // Mac Address of the container
- OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
- Labels map[string]string // List of labels set to this container
- StopSignal string `json:",omitempty"` // Signal to stop a container
- StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
- Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
-}
diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go
deleted file mode 100644
index 5a4069a..0000000
--- a/vendor/github.com/docker/engine-api/types/container/host_config.go
+++ /dev/null
@@ -1,324 +0,0 @@
-package container
-
-import (
- "strings"
-
- "github.com/docker/engine-api/types/blkiodev"
- "github.com/docker/engine-api/types/mount"
- "github.com/docker/engine-api/types/strslice"
- "github.com/docker/go-connections/nat"
- "github.com/docker/go-units"
-)
-
-// NetworkMode represents the container network stack.
-type NetworkMode string
-
-// Isolation represents the isolation technology of a container. The supported
-// values are platform specific
-type Isolation string
-
-// IsDefault indicates the default isolation technology of a container. On Linux this
-// is the native driver. On Windows, this is a Windows Server Container.
-func (i Isolation) IsDefault() bool {
- return strings.ToLower(string(i)) == "default" || string(i) == ""
-}
-
-// IpcMode represents the container ipc stack.
-type IpcMode string
-
-// IsPrivate indicates whether the container uses its private ipc stack.
-func (n IpcMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
-}
-
-// IsHost indicates whether the container uses the host's ipc stack.
-func (n IpcMode) IsHost() bool {
- return n == "host"
-}
-
-// IsContainer indicates whether the container uses a container's ipc stack.
-func (n IpcMode) IsContainer() bool {
- parts := strings.SplitN(string(n), ":", 2)
- return len(parts) > 1 && parts[0] == "container"
-}
-
-// Valid indicates whether the ipc stack is valid.
-func (n IpcMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- case "container":
- if len(parts) != 2 || parts[1] == "" {
- return false
- }
- default:
- return false
- }
- return true
-}
-
-// Container returns the name of the container ipc stack is going to be used.
-func (n IpcMode) Container() string {
- parts := strings.SplitN(string(n), ":", 2)
- if len(parts) > 1 {
- return parts[1]
- }
- return ""
-}
-
-// UsernsMode represents userns mode in the container.
-type UsernsMode string
-
-// IsHost indicates whether the container uses the host's userns.
-func (n UsernsMode) IsHost() bool {
- return n == "host"
-}
-
-// IsPrivate indicates whether the container uses the a private userns.
-func (n UsernsMode) IsPrivate() bool {
- return !(n.IsHost())
-}
-
-// Valid indicates whether the userns is valid.
-func (n UsernsMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- default:
- return false
- }
- return true
-}
-
-// CgroupSpec represents the cgroup to use for the container.
-type CgroupSpec string
-
-// IsContainer indicates whether the container is using another container cgroup
-func (c CgroupSpec) IsContainer() bool {
- parts := strings.SplitN(string(c), ":", 2)
- return len(parts) > 1 && parts[0] == "container"
-}
-
-// Valid indicates whether the cgroup spec is valid.
-func (c CgroupSpec) Valid() bool {
- return c.IsContainer() || c == ""
-}
-
-// Container returns the name of the container whose cgroup will be used.
-func (c CgroupSpec) Container() string {
- parts := strings.SplitN(string(c), ":", 2)
- if len(parts) > 1 {
- return parts[1]
- }
- return ""
-}
-
-// UTSMode represents the UTS namespace of the container.
-type UTSMode string
-
-// IsPrivate indicates whether the container uses its private UTS namespace.
-func (n UTSMode) IsPrivate() bool {
- return !(n.IsHost())
-}
-
-// IsHost indicates whether the container uses the host's UTS namespace.
-func (n UTSMode) IsHost() bool {
- return n == "host"
-}
-
-// Valid indicates whether the UTS namespace is valid.
-func (n UTSMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- default:
- return false
- }
- return true
-}
-
-// PidMode represents the pid namespace of the container.
-type PidMode string
-
-// IsPrivate indicates whether the container uses its own new pid namespace.
-func (n PidMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
-}
-
-// IsHost indicates whether the container uses the host's pid namespace.
-func (n PidMode) IsHost() bool {
- return n == "host"
-}
-
-// IsContainer indicates whether the container uses a container's pid namespace.
-func (n PidMode) IsContainer() bool {
- parts := strings.SplitN(string(n), ":", 2)
- return len(parts) > 1 && parts[0] == "container"
-}
-
-// Valid indicates whether the pid namespace is valid.
-func (n PidMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- case "container":
- if len(parts) != 2 || parts[1] == "" {
- return false
- }
- default:
- return false
- }
- return true
-}
-
-// Container returns the name of the container whose pid namespace is going to be used.
-func (n PidMode) Container() string {
- parts := strings.SplitN(string(n), ":", 2)
- if len(parts) > 1 {
- return parts[1]
- }
- return ""
-}
-
-// DeviceMapping represents the device mapping between the host and the container.
-type DeviceMapping struct {
- PathOnHost string
- PathInContainer string
- CgroupPermissions string
-}
-
-// RestartPolicy represents the restart policies of the container.
-type RestartPolicy struct {
- Name string
- MaximumRetryCount int
-}
-
-// IsNone indicates whether the container has the "no" restart policy.
-// This means the container will not automatically restart when exiting.
-func (rp *RestartPolicy) IsNone() bool {
- return rp.Name == "no" || rp.Name == ""
-}
-
-// IsAlways indicates whether the container has the "always" restart policy.
-// This means the container will automatically restart regardless of the exit status.
-func (rp *RestartPolicy) IsAlways() bool {
- return rp.Name == "always"
-}
-
-// IsOnFailure indicates whether the container has the "on-failure" restart policy.
-// This means the container will automatically restart of exiting with a non-zero exit status.
-func (rp *RestartPolicy) IsOnFailure() bool {
- return rp.Name == "on-failure"
-}
-
-// IsUnlessStopped indicates whether the container has the
-// "unless-stopped" restart policy. This means the container will
-// automatically restart unless user has put it to stopped state.
-func (rp *RestartPolicy) IsUnlessStopped() bool {
- return rp.Name == "unless-stopped"
-}
-
-// IsSame compares two RestartPolicy to see if they are the same
-func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
- return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
-}
-
-// LogConfig represents the logging configuration of the container.
-type LogConfig struct {
- Type string
- Config map[string]string
-}
-
-// Resources contains container's resources (cgroups config, ulimits...)
-type Resources struct {
- // Applicable to all platforms
- CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
- Memory int64 // Memory limit (in bytes)
-
- // Applicable to UNIX platforms
- CgroupParent string // Parent cgroup.
- BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
- BlkioWeightDevice []*blkiodev.WeightDevice
- BlkioDeviceReadBps []*blkiodev.ThrottleDevice
- BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
- BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
- BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
- CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
- CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
- CpusetCpus string // CpusetCpus 0-2, 0,1
- CpusetMems string // CpusetMems 0-2, 0,1
- Devices []DeviceMapping // List of devices to map inside the container
- DiskQuota int64 // Disk limit (in bytes)
- KernelMemory int64 // Kernel memory limit (in bytes)
- MemoryReservation int64 // Memory soft limit (in bytes)
- MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
- MemorySwappiness *int64 // Tuning container memory swappiness behaviour
- OomKillDisable *bool // Whether to disable OOM Killer or not
- PidsLimit int64 // Setting pids limit for a container
- Ulimits []*units.Ulimit // List of ulimits to be set in the container
-
- // Applicable to Windows
- CPUCount int64 `json:"CpuCount"` // CPU count
- CPUPercent int64 `json:"CpuPercent"` // CPU percent
- IOMaximumIOps uint64 // Maximum IOps for the container system drive
- IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
-}
-
-// UpdateConfig holds the mutable attributes of a Container.
-// Those attributes can be updated at runtime.
-type UpdateConfig struct {
- // Contains container's resources (cgroups, ulimits)
- Resources
- RestartPolicy RestartPolicy
-}
-
-// HostConfig the non-portable Config structure of a container.
-// Here, "non-portable" means "dependent of the host we are running on".
-// Portable information *should* appear in Config.
-type HostConfig struct {
- // Applicable to all platforms
- Binds []string // List of volume bindings for this container
- ContainerIDFile string // File (path) where the containerId is written
- LogConfig LogConfig // Configuration of the logs for this container
- NetworkMode NetworkMode // Network mode to use for the container
- PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
- RestartPolicy RestartPolicy // Restart policy to be used for the container
- AutoRemove bool // Automatically remove container when it exits
- VolumeDriver string // Name of the volume driver used to mount volumes
- VolumesFrom []string // List of volumes to take from other container
-
- // Applicable to UNIX platforms
- CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
- CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
- DNS []string `json:"Dns"` // List of DNS server to lookup
- DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
- DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
- ExtraHosts []string // List of extra hosts
- GroupAdd []string // List of additional groups that the container process will run as
- IpcMode IpcMode // IPC namespace to use for the container
- Cgroup CgroupSpec // Cgroup to use for the container
- Links []string // List of links (in the name:alias form)
- OomScoreAdj int // Container preference for OOM-killing
- PidMode PidMode // PID namespace to use for the container
- Privileged bool // Is the container in privileged mode
- PublishAllPorts bool // Should docker publish all exposed port for the container
- ReadonlyRootfs bool // Is the container root filesystem in read-only
- SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
- StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
- Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
- UTSMode UTSMode // UTS namespace to use for the container
- UsernsMode UsernsMode // The user namespace to use for the container
- ShmSize int64 // Total shm memory usage
- Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
- Runtime string `json:",omitempty"` // Runtime to use with this container
-
- // Applicable to Windows
- ConsoleSize [2]int // Initial console size
- Isolation Isolation // Isolation technology of the container (eg default, hyperv)
-
- // Contains container's resources (cgroups, ulimits)
- Resources
-
- // Mounts specs used by the container
- Mounts []mount.Mount `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go
deleted file mode 100644
index 4171059..0000000
--- a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// +build !windows
-
-package container
-
-import "strings"
-
-// IsValid indicates if an isolation technology is valid
-func (i Isolation) IsValid() bool {
- return i.IsDefault()
-}
-
-// IsPrivate indicates whether container uses it's private network stack.
-func (n NetworkMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
-}
-
-// IsDefault indicates whether container uses the default network stack.
-func (n NetworkMode) IsDefault() bool {
- return n == "default"
-}
-
-// NetworkName returns the name of the network stack.
-func (n NetworkMode) NetworkName() string {
- if n.IsBridge() {
- return "bridge"
- } else if n.IsHost() {
- return "host"
- } else if n.IsContainer() {
- return "container"
- } else if n.IsNone() {
- return "none"
- } else if n.IsDefault() {
- return "default"
- } else if n.IsUserDefined() {
- return n.UserDefined()
- }
- return ""
-}
-
-// IsBridge indicates whether container uses the bridge network stack
-func (n NetworkMode) IsBridge() bool {
- return n == "bridge"
-}
-
-// IsHost indicates whether container uses the host network stack.
-func (n NetworkMode) IsHost() bool {
- return n == "host"
-}
-
-// IsContainer indicates whether container uses a container network stack.
-func (n NetworkMode) IsContainer() bool {
- parts := strings.SplitN(string(n), ":", 2)
- return len(parts) > 1 && parts[0] == "container"
-}
-
-// IsNone indicates whether container isn't using a network stack.
-func (n NetworkMode) IsNone() bool {
- return n == "none"
-}
-
-// ConnectedContainer is the id of the container which network this container is connected to.
-func (n NetworkMode) ConnectedContainer() string {
- parts := strings.SplitN(string(n), ":", 2)
- if len(parts) > 1 {
- return parts[1]
- }
- return ""
-}
-
-// IsUserDefined indicates user-created network
-func (n NetworkMode) IsUserDefined() bool {
- return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
-}
-
-//UserDefined indicates user-created network
-func (n NetworkMode) UserDefined() string {
- if n.IsUserDefined() {
- return string(n)
- }
- return ""
-}
diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go
deleted file mode 100644
index 0ee332b..0000000
--- a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package container
-
-import (
- "strings"
-)
-
-// IsDefault indicates whether container uses the default network stack.
-func (n NetworkMode) IsDefault() bool {
- return n == "default"
-}
-
-// IsNone indicates whether container isn't using a network stack.
-func (n NetworkMode) IsNone() bool {
- return n == "none"
-}
-
-// IsContainer indicates whether container uses a container network stack.
-// Returns false as windows doesn't support this mode
-func (n NetworkMode) IsContainer() bool {
- return false
-}
-
-// IsBridge indicates whether container uses the bridge network stack
-// in windows it is given the name NAT
-func (n NetworkMode) IsBridge() bool {
- return n == "nat"
-}
-
-// IsHost indicates whether container uses the host network stack.
-// returns false as this is not supported by windows
-func (n NetworkMode) IsHost() bool {
- return false
-}
-
-// IsPrivate indicates whether container uses its private network stack.
-func (n NetworkMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
-}
-
-// ConnectedContainer is the id of the container which network this container is connected to.
-// Returns blank string on windows
-func (n NetworkMode) ConnectedContainer() string {
- return ""
-}
-
-// IsUserDefined indicates user-created network
-func (n NetworkMode) IsUserDefined() bool {
- return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
-}
-
-// IsHyperV indicates the use of a Hyper-V partition for isolation
-func (i Isolation) IsHyperV() bool {
- return strings.ToLower(string(i)) == "hyperv"
-}
-
-// IsProcess indicates the use of process isolation
-func (i Isolation) IsProcess() bool {
- return strings.ToLower(string(i)) == "process"
-}
-
-// IsValid indicates if an isolation technology is valid
-func (i Isolation) IsValid() bool {
- return i.IsDefault() || i.IsHyperV() || i.IsProcess()
-}
-
-// NetworkName returns the name of the network stack.
-func (n NetworkMode) NetworkName() string {
- if n.IsDefault() {
- return "default"
- } else if n.IsBridge() {
- return "nat"
- } else if n.IsNone() {
- return "none"
- } else if n.IsUserDefined() {
- return n.UserDefined()
- }
-
- return ""
-}
-
-//UserDefined indicates user-created network
-func (n NetworkMode) UserDefined() string {
- if n.IsUserDefined() {
- return string(n)
- }
- return ""
-}
diff --git a/vendor/github.com/docker/engine-api/types/errors.go b/vendor/github.com/docker/engine-api/types/errors.go
deleted file mode 100644
index 649ab95..0000000
--- a/vendor/github.com/docker/engine-api/types/errors.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package types
-
-// ErrorResponse is the response body of API errors.
-type ErrorResponse struct {
- Message string `json:"message"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go
deleted file mode 100644
index dc2c48b..0000000
--- a/vendor/github.com/docker/engine-api/types/filters/parse.go
+++ /dev/null
@@ -1,307 +0,0 @@
-// Package filters provides helper function to parse and handle command line
-// filter, used for example in docker ps or docker images commands.
-package filters
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "regexp"
- "strings"
-
- "github.com/docker/engine-api/types/versions"
-)
-
-// Args stores filter arguments as map key:{map key: bool}.
-// It contains an aggregation of the map of arguments (which are in the form
-// of -f 'key=value') based on the key, and stores values for the same key
-// in a map with string keys and boolean values.
-// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
-// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
-type Args struct {
- fields map[string]map[string]bool
-}
-
-// NewArgs initializes a new Args struct.
-func NewArgs() Args {
- return Args{fields: map[string]map[string]bool{}}
-}
-
-// ParseFlag parses the argument to the filter flag. Like
-//
-// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
-//
-// If prev map is provided, then it is appended to, and returned. By default a new
-// map is created.
-func ParseFlag(arg string, prev Args) (Args, error) {
- filters := prev
- if len(arg) == 0 {
- return filters, nil
- }
-
- if !strings.Contains(arg, "=") {
- return filters, ErrBadFormat
- }
-
- f := strings.SplitN(arg, "=", 2)
-
- name := strings.ToLower(strings.TrimSpace(f[0]))
- value := strings.TrimSpace(f[1])
-
- filters.Add(name, value)
-
- return filters, nil
-}
-
-// ErrBadFormat is an error returned in case of bad format for a filter.
-var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
-
-// ToParam packs the Args into a string for easy transport from client to server.
-func ToParam(a Args) (string, error) {
- // this way we don't URL encode {}, just empty space
- if a.Len() == 0 {
- return "", nil
- }
-
- buf, err := json.Marshal(a.fields)
- if err != nil {
- return "", err
- }
- return string(buf), nil
-}
-
-// ToParamWithVersion packs the Args into a string for easy transport from client to server.
-// The generated string will depend on the specified version (corresponding to the API version).
-func ToParamWithVersion(version string, a Args) (string, error) {
- // this way we don't URL encode {}, just empty space
- if a.Len() == 0 {
- return "", nil
- }
-
- // for daemons older than v1.10, filter must be of the form map[string][]string
- buf := []byte{}
- err := errors.New("")
- if version != "" && versions.LessThan(version, "1.22") {
- buf, err = json.Marshal(convertArgsToSlice(a.fields))
- } else {
- buf, err = json.Marshal(a.fields)
- }
- if err != nil {
- return "", err
- }
- return string(buf), nil
-}
-
-// FromParam unpacks the filter Args.
-func FromParam(p string) (Args, error) {
- if len(p) == 0 {
- return NewArgs(), nil
- }
-
- r := strings.NewReader(p)
- d := json.NewDecoder(r)
-
- m := map[string]map[string]bool{}
- if err := d.Decode(&m); err != nil {
- r.Seek(0, 0)
-
- // Allow parsing old arguments in slice format.
- // Because other libraries might be sending them in this format.
- deprecated := map[string][]string{}
- if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
- m = deprecatedArgs(deprecated)
- } else {
- return NewArgs(), err
- }
- }
- return Args{m}, nil
-}
-
-// Get returns the list of values associates with a field.
-// It returns a slice of strings to keep backwards compatibility with old code.
-func (filters Args) Get(field string) []string {
- values := filters.fields[field]
- if values == nil {
- return make([]string, 0)
- }
- slice := make([]string, 0, len(values))
- for key := range values {
- slice = append(slice, key)
- }
- return slice
-}
-
-// Add adds a new value to a filter field.
-func (filters Args) Add(name, value string) {
- if _, ok := filters.fields[name]; ok {
- filters.fields[name][value] = true
- } else {
- filters.fields[name] = map[string]bool{value: true}
- }
-}
-
-// Del removes a value from a filter field.
-func (filters Args) Del(name, value string) {
- if _, ok := filters.fields[name]; ok {
- delete(filters.fields[name], value)
- }
-}
-
-// Len returns the number of fields in the arguments.
-func (filters Args) Len() int {
- return len(filters.fields)
-}
-
-// MatchKVList returns true if the values for the specified field matches the ones
-// from the sources.
-// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
-// field is 'label' and sources are {'label1': '1', 'label2': '2'}
-// it returns true.
-func (filters Args) MatchKVList(field string, sources map[string]string) bool {
- fieldValues := filters.fields[field]
-
- //do not filter if there is no filter set or cannot determine filter
- if len(fieldValues) == 0 {
- return true
- }
-
- if sources == nil || len(sources) == 0 {
- return false
- }
-
- for name2match := range fieldValues {
- testKV := strings.SplitN(name2match, "=", 2)
-
- v, ok := sources[testKV[0]]
- if !ok {
- return false
- }
- if len(testKV) == 2 && testKV[1] != v {
- return false
- }
- }
-
- return true
-}
-
-// Match returns true if the values for the specified field matches the source string
-// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
-// field is 'image.name' and source is 'ubuntu'
-// it returns true.
-func (filters Args) Match(field, source string) bool {
- if filters.ExactMatch(field, source) {
- return true
- }
-
- fieldValues := filters.fields[field]
- for name2match := range fieldValues {
- match, err := regexp.MatchString(name2match, source)
- if err != nil {
- continue
- }
- if match {
- return true
- }
- }
- return false
-}
-
-// ExactMatch returns true if the source matches exactly one of the filters.
-func (filters Args) ExactMatch(field, source string) bool {
- fieldValues, ok := filters.fields[field]
- //do not filter if there is no filter set or cannot determine filter
- if !ok || len(fieldValues) == 0 {
- return true
- }
-
- // try to match full name value to avoid O(N) regular expression matching
- return fieldValues[source]
-}
-
-// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
-func (filters Args) UniqueExactMatch(field, source string) bool {
- fieldValues := filters.fields[field]
- //do not filter if there is no filter set or cannot determine filter
- if len(fieldValues) == 0 {
- return true
- }
- if len(filters.fields[field]) != 1 {
- return false
- }
-
- // try to match full name value to avoid O(N) regular expression matching
- return fieldValues[source]
-}
-
-// FuzzyMatch returns true if the source matches exactly one of the filters,
-// or the source has one of the filters as a prefix.
-func (filters Args) FuzzyMatch(field, source string) bool {
- if filters.ExactMatch(field, source) {
- return true
- }
-
- fieldValues := filters.fields[field]
- for prefix := range fieldValues {
- if strings.HasPrefix(source, prefix) {
- return true
- }
- }
- return false
-}
-
-// Include returns true if the name of the field to filter is in the filters.
-func (filters Args) Include(field string) bool {
- _, ok := filters.fields[field]
- return ok
-}
-
-// Validate ensures that all the fields in the filter are valid.
-// It returns an error as soon as it finds an invalid field.
-func (filters Args) Validate(accepted map[string]bool) error {
- for name := range filters.fields {
- if !accepted[name] {
- return fmt.Errorf("Invalid filter '%s'", name)
- }
- }
- return nil
-}
-
-// WalkValues iterates over the list of filtered values for a field.
-// It stops the iteration if it finds an error and it returns that error.
-func (filters Args) WalkValues(field string, op func(value string) error) error {
- if _, ok := filters.fields[field]; !ok {
- return nil
- }
- for v := range filters.fields[field] {
- if err := op(v); err != nil {
- return err
- }
- }
- return nil
-}
-
-func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
- m := map[string]map[string]bool{}
- for k, v := range d {
- values := map[string]bool{}
- for _, vv := range v {
- values[vv] = true
- }
- m[k] = values
- }
- return m
-}
-
-func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
- m := map[string][]string{}
- for k, v := range f {
- values := []string{}
- for kk := range v {
- if v[kk] {
- values = append(values, kk)
- }
- }
- m[k] = values
- }
- return m
-}
diff --git a/vendor/github.com/docker/engine-api/types/mount/mount.go b/vendor/github.com/docker/engine-api/types/mount/mount.go
deleted file mode 100644
index 5516ed0..0000000
--- a/vendor/github.com/docker/engine-api/types/mount/mount.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package mount
-
-// Type represents the type of a mount.
-type Type string
-
-const (
- // TypeBind BIND
- TypeBind Type = "bind"
- // TypeVolume VOLUME
- TypeVolume Type = "volume"
-)
-
-// Mount represents a mount (volume).
-type Mount struct {
- Type Type `json:",omitempty"`
- Source string `json:",omitempty"`
- Target string `json:",omitempty"`
- ReadOnly bool `json:",omitempty"`
-
- BindOptions *BindOptions `json:",omitempty"`
- VolumeOptions *VolumeOptions `json:",omitempty"`
-}
-
-// Propagation represents the propagation of a mount.
-type Propagation string
-
-const (
- // PropagationRPrivate RPRIVATE
- PropagationRPrivate Propagation = "rprivate"
- // PropagationPrivate PRIVATE
- PropagationPrivate Propagation = "private"
- // PropagationRShared RSHARED
- PropagationRShared Propagation = "rshared"
- // PropagationShared SHARED
- PropagationShared Propagation = "shared"
- // PropagationRSlave RSLAVE
- PropagationRSlave Propagation = "rslave"
- // PropagationSlave SLAVE
- PropagationSlave Propagation = "slave"
-)
-
-// BindOptions defines options specific to mounts of type "bind".
-type BindOptions struct {
- Propagation Propagation `json:",omitempty"`
-}
-
-// VolumeOptions represents the options for a mount of type volume.
-type VolumeOptions struct {
- NoCopy bool `json:",omitempty"`
- Labels map[string]string `json:",omitempty"`
- DriverConfig *Driver `json:",omitempty"`
-}
-
-// Driver represents a volume driver.
-type Driver struct {
- Name string `json:",omitempty"`
- Options map[string]string `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/engine-api/types/network/network.go
deleted file mode 100644
index 47080b6..0000000
--- a/vendor/github.com/docker/engine-api/types/network/network.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package network
-
-// Address represents an IP address
-type Address struct {
- Addr string
- PrefixLen int
-}
-
-// IPAM represents IP Address Management
-type IPAM struct {
- Driver string
- Options map[string]string //Per network IPAM driver options
- Config []IPAMConfig
-}
-
-// IPAMConfig represents IPAM configurations
-type IPAMConfig struct {
- Subnet string `json:",omitempty"`
- IPRange string `json:",omitempty"`
- Gateway string `json:",omitempty"`
- AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
-}
-
-// EndpointIPAMConfig represents IPAM configurations for the endpoint
-type EndpointIPAMConfig struct {
- IPv4Address string `json:",omitempty"`
- IPv6Address string `json:",omitempty"`
- LinkLocalIPs []string `json:",omitempty"`
-}
-
-// EndpointSettings stores the network endpoint details
-type EndpointSettings struct {
- // Configurations
- IPAMConfig *EndpointIPAMConfig
- Links []string
- Aliases []string
- // Operational data
- NetworkID string
- EndpointID string
- Gateway string
- IPAddress string
- IPPrefixLen int
- IPv6Gateway string
- GlobalIPv6Address string
- GlobalIPv6PrefixLen int
- MacAddress string
-}
-
-// NetworkingConfig represents the container's networking configuration for each of its interfaces
-// Carries the networking configs specified in the `docker run` and `docker network connect` commands
-type NetworkingConfig struct {
- EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
-}
diff --git a/vendor/github.com/docker/engine-api/types/plugin.go b/vendor/github.com/docker/engine-api/types/plugin.go
deleted file mode 100644
index 601c0ac..0000000
--- a/vendor/github.com/docker/engine-api/types/plugin.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// +build experimental
-
-package types
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// PluginInstallOptions holds parameters to install a plugin.
-type PluginInstallOptions struct {
- Disabled bool
- AcceptAllPermissions bool
- RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
- PrivilegeFunc RequestPrivilegeFunc
- AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
-}
-
-// PluginConfig represents the values of settings potentially modifiable by a user
-type PluginConfig struct {
- Mounts []PluginMount
- Env []string
- Args []string
- Devices []PluginDevice
-}
-
-// Plugin represents a Docker plugin for the remote API
-type Plugin struct {
- ID string `json:"Id,omitempty"`
- Name string
- Tag string
- // Enabled is true when the plugin is running, is false when the plugin is not running, only installed.
- Enabled bool
- Config PluginConfig
- Manifest PluginManifest
-}
-
-// PluginsListResponse contains the response for the remote API
-type PluginsListResponse []*Plugin
-
-const (
- authzDriver = "AuthzDriver"
- graphDriver = "GraphDriver"
- ipamDriver = "IpamDriver"
- networkDriver = "NetworkDriver"
- volumeDriver = "VolumeDriver"
-)
-
-// PluginInterfaceType represents a type that a plugin implements.
-type PluginInterfaceType struct {
- Prefix string // This is always "docker"
- Capability string // Capability should be validated against the above list.
- Version string // Plugin API version. Depends on the capability
-}
-
-// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
-func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
- versionIndex := len(p)
- prefixIndex := 0
- if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
- return fmt.Errorf("%q is not a plugin interface type", p)
- }
- p = p[1 : len(p)-1]
-loop:
- for i, b := range p {
- switch b {
- case '.':
- prefixIndex = i
- case '/':
- versionIndex = i
- break loop
- }
- }
- t.Prefix = string(p[:prefixIndex])
- t.Capability = string(p[prefixIndex+1 : versionIndex])
- if versionIndex < len(p) {
- t.Version = string(p[versionIndex+1:])
- }
- return nil
-}
-
-// MarshalJSON implements json.Marshaler for PluginInterfaceType
-func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.String())
-}
-
-// String implements fmt.Stringer for PluginInterfaceType
-func (t PluginInterfaceType) String() string {
- return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
-}
-
-// PluginInterface describes the interface between Docker and plugin
-type PluginInterface struct {
- Types []PluginInterfaceType
- Socket string
-}
-
-// PluginSetting is to be embedded in other structs, if they are supposed to be
-// modifiable by the user.
-type PluginSetting struct {
- Name string
- Description string
- Settable []string
-}
-
-// PluginNetwork represents the network configuration for a plugin
-type PluginNetwork struct {
- Type string
-}
-
-// PluginMount represents the mount configuration for a plugin
-type PluginMount struct {
- PluginSetting
- Source *string
- Destination string
- Type string
- Options []string
-}
-
-// PluginEnv represents an environment variable for a plugin
-type PluginEnv struct {
- PluginSetting
- Value *string
-}
-
-// PluginArgs represents the command line arguments for a plugin
-type PluginArgs struct {
- PluginSetting
- Value []string
-}
-
-// PluginDevice represents a device for a plugin
-type PluginDevice struct {
- PluginSetting
- Path *string
-}
-
-// PluginUser represents the user for the plugin's process
-type PluginUser struct {
- UID uint32 `json:"Uid,omitempty"`
- GID uint32 `json:"Gid,omitempty"`
-}
-
-// PluginManifest represents the manifest of a plugin
-type PluginManifest struct {
- ManifestVersion string
- Description string
- Documentation string
- Interface PluginInterface
- Entrypoint []string
- Workdir string
- User PluginUser `json:",omitempty"`
- Network PluginNetwork
- Capabilities []string
- Mounts []PluginMount
- Devices []PluginDevice
- Env []PluginEnv
- Args PluginArgs
-}
-
-// PluginPrivilege describes a permission the user has to accept
-// upon installing a plugin.
-type PluginPrivilege struct {
- Name string
- Description string
- Value []string
-}
-
-// PluginPrivileges is a list of PluginPrivilege
-type PluginPrivileges []PluginPrivilege
diff --git a/vendor/github.com/docker/engine-api/types/reference/image_reference.go b/vendor/github.com/docker/engine-api/types/reference/image_reference.go
deleted file mode 100644
index be9cf8e..0000000
--- a/vendor/github.com/docker/engine-api/types/reference/image_reference.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package reference
-
-import (
- distreference "github.com/docker/distribution/reference"
-)
-
-// Parse parses the given references and returns the repository and
-// tag (if present) from it. If there is an error during parsing, it will
-// return an error.
-func Parse(ref string) (string, string, error) {
- distributionRef, err := distreference.ParseNamed(ref)
- if err != nil {
- return "", "", err
- }
-
- tag := GetTagFromNamedRef(distributionRef)
- return distributionRef.Name(), tag, nil
-}
-
-// GetTagFromNamedRef returns a tag from the specified reference.
-// This function is necessary as long as the docker "server" api makes the distinction between repository
-// and tags.
-func GetTagFromNamedRef(ref distreference.Named) string {
- var tag string
- switch x := ref.(type) {
- case distreference.Digested:
- tag = x.Digest().String()
- case distreference.NamedTagged:
- tag = x.Tag()
- default:
- tag = "latest"
- }
- return tag
-}
diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go
deleted file mode 100644
index 28fafab..0000000
--- a/vendor/github.com/docker/engine-api/types/registry/registry.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package registry
-
-import (
- "encoding/json"
- "net"
-)
-
-// ServiceConfig stores daemon registry services configuration.
-type ServiceConfig struct {
- InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
- IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
- Mirrors []string
-}
-
-// NetIPNet is the net.IPNet type, which can be marshalled and
-// unmarshalled to JSON
-type NetIPNet net.IPNet
-
-// String returns the CIDR notation of ipnet
-func (ipnet *NetIPNet) String() string {
- return (*net.IPNet)(ipnet).String()
-}
-
-// MarshalJSON returns the JSON representation of the IPNet
-func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
- return json.Marshal((*net.IPNet)(ipnet).String())
-}
-
-// UnmarshalJSON sets the IPNet from a byte array of JSON
-func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
- var ipnetStr string
- if err = json.Unmarshal(b, &ipnetStr); err == nil {
- var cidr *net.IPNet
- if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
- *ipnet = NetIPNet(*cidr)
- }
- }
- return
-}
-
-// IndexInfo contains information about a registry
-//
-// RepositoryInfo Examples:
-// {
-// "Index" : {
-// "Name" : "docker.io",
-// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
-// "Secure" : true,
-// "Official" : true,
-// },
-// "RemoteName" : "library/debian",
-// "LocalName" : "debian",
-// "CanonicalName" : "docker.io/debian"
-// "Official" : true,
-// }
-//
-// {
-// "Index" : {
-// "Name" : "127.0.0.1:5000",
-// "Mirrors" : [],
-// "Secure" : false,
-// "Official" : false,
-// },
-// "RemoteName" : "user/repo",
-// "LocalName" : "127.0.0.1:5000/user/repo",
-// "CanonicalName" : "127.0.0.1:5000/user/repo",
-// "Official" : false,
-// }
-type IndexInfo struct {
- // Name is the name of the registry, such as "docker.io"
- Name string
- // Mirrors is a list of mirrors, expressed as URIs
- Mirrors []string
- // Secure is set to false if the registry is part of the list of
- // insecure registries. Insecure registries accept HTTP and/or accept
- // HTTPS with certificates from unknown CAs.
- Secure bool
- // Official indicates whether this is an official registry
- Official bool
-}
-
-// SearchResult describes a search result returned from a registry
-type SearchResult struct {
- // StarCount indicates the number of stars this repository has
- StarCount int `json:"star_count"`
- // IsOfficial is true if the result is from an official repository.
- IsOfficial bool `json:"is_official"`
- // Name is the name of the repository
- Name string `json:"name"`
- // IsAutomated indicates whether the result is automated
- IsAutomated bool `json:"is_automated"`
- // Description is a textual description of the repository
- Description string `json:"description"`
-}
-
-// SearchResults lists a collection search results returned from a registry
-type SearchResults struct {
- // Query contains the query string that generated the search results
- Query string `json:"query"`
- // NumResults indicates the number of results the query returned
- NumResults int `json:"num_results"`
- // Results is a slice containing the actual results for the search
- Results []SearchResult `json:"results"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/engine-api/types/seccomp.go
deleted file mode 100644
index 4f02ef3..0000000
--- a/vendor/github.com/docker/engine-api/types/seccomp.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package types
-
-// Seccomp represents the config for a seccomp profile for syscall restriction.
-type Seccomp struct {
- DefaultAction Action `json:"defaultAction"`
- // Architectures is kept to maintain backward compatibility with the old
- // seccomp profile.
- Architectures []Arch `json:"architectures,omitempty"`
- ArchMap []Architecture `json:"archMap,omitempty"`
- Syscalls []*Syscall `json:"syscalls"`
-}
-
-// Architecture is used to represent an specific architecture
-// and its sub-architectures
-type Architecture struct {
- Arch Arch `json:"architecture"`
- SubArches []Arch `json:"subArchitectures"`
-}
-
-// Arch used for architectures
-type Arch string
-
-// Additional architectures permitted to be used for system calls
-// By default only the native architecture of the kernel is permitted
-const (
- ArchX86 Arch = "SCMP_ARCH_X86"
- ArchX86_64 Arch = "SCMP_ARCH_X86_64"
- ArchX32 Arch = "SCMP_ARCH_X32"
- ArchARM Arch = "SCMP_ARCH_ARM"
- ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
- ArchMIPS Arch = "SCMP_ARCH_MIPS"
- ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
- ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
- ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
- ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
- ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
- ArchPPC Arch = "SCMP_ARCH_PPC"
- ArchPPC64 Arch = "SCMP_ARCH_PPC64"
- ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
- ArchS390 Arch = "SCMP_ARCH_S390"
- ArchS390X Arch = "SCMP_ARCH_S390X"
-)
-
-// Action taken upon Seccomp rule match
-type Action string
-
-// Define actions for Seccomp rules
-const (
- ActKill Action = "SCMP_ACT_KILL"
- ActTrap Action = "SCMP_ACT_TRAP"
- ActErrno Action = "SCMP_ACT_ERRNO"
- ActTrace Action = "SCMP_ACT_TRACE"
- ActAllow Action = "SCMP_ACT_ALLOW"
-)
-
-// Operator used to match syscall arguments in Seccomp
-type Operator string
-
-// Define operators for syscall arguments in Seccomp
-const (
- OpNotEqual Operator = "SCMP_CMP_NE"
- OpLessThan Operator = "SCMP_CMP_LT"
- OpLessEqual Operator = "SCMP_CMP_LE"
- OpEqualTo Operator = "SCMP_CMP_EQ"
- OpGreaterEqual Operator = "SCMP_CMP_GE"
- OpGreaterThan Operator = "SCMP_CMP_GT"
- OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
-)
-
-// Arg used for matching specific syscall arguments in Seccomp
-type Arg struct {
- Index uint `json:"index"`
- Value uint64 `json:"value"`
- ValueTwo uint64 `json:"valueTwo"`
- Op Operator `json:"op"`
-}
-
-// Filter is used to conditionally apply Seccomp rules
-type Filter struct {
- Caps []string `json:"caps,omitempty"`
- Arches []string `json:"arches,omitempty"`
-}
-
-// Syscall is used to match a group of syscalls in Seccomp
-type Syscall struct {
- Name string `json:"name,omitempty"`
- Names []string `json:"names,omitempty"`
- Action Action `json:"action"`
- Args []*Arg `json:"args"`
- Comment string `json:"comment"`
- Includes Filter `json:"includes"`
- Excludes Filter `json:"excludes"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go
deleted file mode 100644
index b420ebe..0000000
--- a/vendor/github.com/docker/engine-api/types/stats.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Package types is used for API stability in the types and response to the
-// consumers of the API stats endpoint.
-package types
-
-import "time"
-
-// ThrottlingData stores CPU throttling stats of one running container
-type ThrottlingData struct {
- // Number of periods with throttling active
- Periods uint64 `json:"periods"`
- // Number of periods when the container hits its throttling limit.
- ThrottledPeriods uint64 `json:"throttled_periods"`
- // Aggregate time the container was throttled for in nanoseconds.
- ThrottledTime uint64 `json:"throttled_time"`
-}
-
-// CPUUsage stores All CPU stats aggregated since container inception.
-type CPUUsage struct {
- // Total CPU time consumed.
- // Units: nanoseconds.
- TotalUsage uint64 `json:"total_usage"`
- // Total CPU time consumed per core.
- // Units: nanoseconds.
- PercpuUsage []uint64 `json:"percpu_usage"`
- // Time spent by tasks of the cgroup in kernel mode.
- // Units: nanoseconds.
- UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
- // Time spent by tasks of the cgroup in user mode.
- // Units: nanoseconds.
- UsageInUsermode uint64 `json:"usage_in_usermode"`
-}
-
-// CPUStats aggregates and wraps all CPU related info of container
-type CPUStats struct {
- CPUUsage CPUUsage `json:"cpu_usage"`
- SystemUsage uint64 `json:"system_cpu_usage"`
- ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
-}
-
-// MemoryStats aggregates All memory stats since container inception
-type MemoryStats struct {
- // current res_counter usage for memory
- Usage uint64 `json:"usage"`
- // maximum usage ever recorded.
- MaxUsage uint64 `json:"max_usage"`
- // TODO(vishh): Export these as stronger types.
- // all the stats exported via memory.stat.
- Stats map[string]uint64 `json:"stats"`
- // number of times memory usage hits limits.
- Failcnt uint64 `json:"failcnt"`
- Limit uint64 `json:"limit"`
-}
-
-// BlkioStatEntry is one small entity to store a piece of Blkio stats
-// TODO Windows: This can be factored out
-type BlkioStatEntry struct {
- Major uint64 `json:"major"`
- Minor uint64 `json:"minor"`
- Op string `json:"op"`
- Value uint64 `json:"value"`
-}
-
-// BlkioStats stores All IO service stats for data read and write
-// TODO Windows: This can be factored out
-type BlkioStats struct {
- // number of bytes transferred to and from the block device
- IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
- IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
- IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
- IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
- IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
- IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
- IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
- SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
-}
-
-// NetworkStats aggregates All network stats of one container
-// TODO Windows: This will require refactoring
-type NetworkStats struct {
- RxBytes uint64 `json:"rx_bytes"`
- RxPackets uint64 `json:"rx_packets"`
- RxErrors uint64 `json:"rx_errors"`
- RxDropped uint64 `json:"rx_dropped"`
- TxBytes uint64 `json:"tx_bytes"`
- TxPackets uint64 `json:"tx_packets"`
- TxErrors uint64 `json:"tx_errors"`
- TxDropped uint64 `json:"tx_dropped"`
-}
-
-// PidsStats contains the stats of a container's pids
-type PidsStats struct {
- // Current is the number of pids in the cgroup
- Current uint64 `json:"current,omitempty"`
- // Limit is the hard limit on the number of pids in the cgroup.
- // A "Limit" of 0 means that there is no limit.
- Limit uint64 `json:"limit,omitempty"`
-}
-
-// Stats is Ultimate struct aggregating all types of stats of one container
-type Stats struct {
- Read time.Time `json:"read"`
- PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
- CPUStats CPUStats `json:"cpu_stats,omitempty"`
- MemoryStats MemoryStats `json:"memory_stats,omitempty"`
- BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
- PidsStats PidsStats `json:"pids_stats,omitempty"`
-}
-
-// StatsJSON is newly used Networks
-type StatsJSON struct {
- Stats
-
- // Networks request version >=1.21
- Networks map[string]NetworkStats `json:"networks,omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/engine-api/types/strslice/strslice.go
deleted file mode 100644
index bad493f..0000000
--- a/vendor/github.com/docker/engine-api/types/strslice/strslice.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package strslice
-
-import "encoding/json"
-
-// StrSlice represents a string or an array of strings.
-// We need to override the json decoder to accept both options.
-type StrSlice []string
-
-// UnmarshalJSON decodes the byte slice whether it's a string or an array of
-// strings. This method is needed to implement json.Unmarshaler.
-func (e *StrSlice) UnmarshalJSON(b []byte) error {
- if len(b) == 0 {
- // With no input, we preserve the existing value by returning nil and
- // leaving the target alone. This allows defining default values for
- // the type.
- return nil
- }
-
- p := make([]string, 0, 1)
- if err := json.Unmarshal(b, &p); err != nil {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- p = append(p, s)
- }
-
- *e = p
- return nil
-}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/common.go b/vendor/github.com/docker/engine-api/types/swarm/common.go
deleted file mode 100644
index b87f545..0000000
--- a/vendor/github.com/docker/engine-api/types/swarm/common.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package swarm
-
-import "time"
-
-// Version represent the internal object version.
-type Version struct {
- Index uint64 `json:",omitempty"`
-}
-
-// Meta is base object inherited by most of the other once.
-type Meta struct {
- Version Version `json:",omitempty"`
- CreatedAt time.Time `json:",omitempty"`
- UpdatedAt time.Time `json:",omitempty"`
-}
-
-// Annotations represents how to describe an object.
-type Annotations struct {
- Name string `json:",omitempty"`
- Labels map[string]string `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/container.go b/vendor/github.com/docker/engine-api/types/swarm/container.go
deleted file mode 100644
index 11a3889..0000000
--- a/vendor/github.com/docker/engine-api/types/swarm/container.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package swarm
-
-import (
- "time"
-
- "github.com/docker/engine-api/types/mount"
-)
-
-// ContainerSpec represents the spec of a container.
-type ContainerSpec struct {
- Image string `json:",omitempty"`
- Labels map[string]string `json:",omitempty"`
- Command []string `json:",omitempty"`
- Args []string `json:",omitempty"`
- Env []string `json:",omitempty"`
- Dir string `json:",omitempty"`
- User string `json:",omitempty"`
- Groups []string `json:",omitempty"`
- TTY bool `json:",omitempty"`
- Mounts []mount.Mount `json:",omitempty"`
- StopGracePeriod *time.Duration `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/network.go b/vendor/github.com/docker/engine-api/types/swarm/network.go
deleted file mode 100644
index 0af0ce1..0000000
--- a/vendor/github.com/docker/engine-api/types/swarm/network.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package swarm
-
-// Endpoint represents an endpoint.
-type Endpoint struct {
- Spec EndpointSpec `json:",omitempty"`
- Ports []PortConfig `json:",omitempty"`
- VirtualIPs []EndpointVirtualIP `json:",omitempty"`
-}
-
-// EndpointSpec represents the spec of an endpoint.
-type EndpointSpec struct {
- Mode ResolutionMode `json:",omitempty"`
- Ports []PortConfig `json:",omitempty"`
-}
-
-// ResolutionMode represents a resolution mode.
-type ResolutionMode string
-
-const (
- // ResolutionModeVIP VIP
- ResolutionModeVIP ResolutionMode = "vip"
- // ResolutionModeDNSRR DNSRR
- ResolutionModeDNSRR ResolutionMode = "dnsrr"
-)
-
-// PortConfig represents the config of a port.
-type PortConfig struct {
- Name string `json:",omitempty"`
- Protocol PortConfigProtocol `json:",omitempty"`
- TargetPort uint32 `json:",omitempty"`
- PublishedPort uint32 `json:",omitempty"`
-}
-
-// PortConfigProtocol represents the protocol of a port.
-type PortConfigProtocol string
-
-const (
- // TODO(stevvooe): These should be used generally, not just for PortConfig.
-
- // PortConfigProtocolTCP TCP
- PortConfigProtocolTCP PortConfigProtocol = "tcp"
- // PortConfigProtocolUDP UDP
- PortConfigProtocolUDP PortConfigProtocol = "udp"
-)
-
-// EndpointVirtualIP represents the virtual ip of a port.
-type EndpointVirtualIP struct {
- NetworkID string `json:",omitempty"`
- Addr string `json:",omitempty"`
-}
-
-// Network represents a network.
-type Network struct {
- ID string
- Meta
- Spec NetworkSpec `json:",omitempty"`
- DriverState Driver `json:",omitempty"`
- IPAMOptions *IPAMOptions `json:",omitempty"`
-}
-
-// NetworkSpec represents the spec of a network.
-type NetworkSpec struct {
- Annotations
- DriverConfiguration *Driver `json:",omitempty"`
- IPv6Enabled bool `json:",omitempty"`
- Internal bool `json:",omitempty"`
- Attachable bool `json:",omitempty"`
- IPAMOptions *IPAMOptions `json:",omitempty"`
-}
-
-// NetworkAttachmentConfig represents the configuration of a network attachment.
-type NetworkAttachmentConfig struct {
- Target string `json:",omitempty"`
- Aliases []string `json:",omitempty"`
-}
-
-// NetworkAttachment represents a network attachment.
-type NetworkAttachment struct {
- Network Network `json:",omitempty"`
- Addresses []string `json:",omitempty"`
-}
-
-// IPAMOptions represents ipam options.
-type IPAMOptions struct {
- Driver Driver `json:",omitempty"`
- Configs []IPAMConfig `json:",omitempty"`
-}
-
-// IPAMConfig represents ipam configuration.
-type IPAMConfig struct {
- Subnet string `json:",omitempty"`
- Range string `json:",omitempty"`
- Gateway string `json:",omitempty"`
-}
-
-// Driver represents a network driver.
-type Driver struct {
- Name string `json:",omitempty"`
- Options map[string]string `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/node.go b/vendor/github.com/docker/engine-api/types/swarm/node.go
deleted file mode 100644
index 9987662..0000000
--- a/vendor/github.com/docker/engine-api/types/swarm/node.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package swarm
-
-// Node represents a node.
-type Node struct {
- ID string
- Meta
-
- Spec NodeSpec `json:",omitempty"`
- Description NodeDescription `json:",omitempty"`
- Status NodeStatus `json:",omitempty"`
- ManagerStatus *ManagerStatus `json:",omitempty"`
-}
-
-// NodeSpec represents the spec of a node.
-type NodeSpec struct {
- Annotations
- Role NodeRole `json:",omitempty"`
- Availability NodeAvailability `json:",omitempty"`
-}
-
-// NodeRole represents the role of a node.
-type NodeRole string
-
-const (
- // NodeRoleWorker WORKER
- NodeRoleWorker NodeRole = "worker"
- // NodeRoleManager MANAGER
- NodeRoleManager NodeRole = "manager"
-)
-
-// NodeAvailability represents the availability of a node.
-type NodeAvailability string
-
-const (
- // NodeAvailabilityActive ACTIVE
- NodeAvailabilityActive NodeAvailability = "active"
- // NodeAvailabilityPause PAUSE
- NodeAvailabilityPause NodeAvailability = "pause"
- // NodeAvailabilityDrain DRAIN
- NodeAvailabilityDrain NodeAvailability = "drain"
-)
-
-// NodeDescription represents the description of a node.
-type NodeDescription struct {
- Hostname string `json:",omitempty"`
- Platform Platform `json:",omitempty"`
- Resources Resources `json:",omitempty"`
- Engine EngineDescription `json:",omitempty"`
-}
-
-// Platform represents the platfrom (Arch/OS).
-type Platform struct {
- Architecture string `json:",omitempty"`
- OS string `json:",omitempty"`
-}
-
-// EngineDescription represents the description of an engine.
-type EngineDescription struct {
- EngineVersion string `json:",omitempty"`
- Labels map[string]string `json:",omitempty"`
- Plugins []PluginDescription `json:",omitempty"`
-}
-
-// PluginDescription represents the description of an engine plugin.
-type PluginDescription struct {
- Type string `json:",omitempty"`
- Name string `json:",omitempty"`
-}
-
-// NodeStatus represents the status of a node.
-type NodeStatus struct {
- State NodeState `json:",omitempty"`
- Message string `json:",omitempty"`
-}
-
-// Reachability represents the reachability of a node.
-type Reachability string
-
-const (
- // ReachabilityUnknown UNKNOWN
- ReachabilityUnknown Reachability = "unknown"
- // ReachabilityUnreachable UNREACHABLE
- ReachabilityUnreachable Reachability = "unreachable"
- // ReachabilityReachable REACHABLE
- ReachabilityReachable Reachability = "reachable"
-)
-
-// ManagerStatus represents the status of a manager.
-type ManagerStatus struct {
- Leader bool `json:",omitempty"`
- Reachability Reachability `json:",omitempty"`
- Addr string `json:",omitempty"`
-}
-
-// NodeState represents the state of a node.
-type NodeState string
-
-const (
- // NodeStateUnknown UNKNOWN
- NodeStateUnknown NodeState = "unknown"
- // NodeStateDown DOWN
- NodeStateDown NodeState = "down"
- // NodeStateReady READY
- NodeStateReady NodeState = "ready"
- // NodeStateDisconnected DISCONNECTED
- NodeStateDisconnected NodeState = "disconnected"
-)
diff --git a/vendor/github.com/docker/engine-api/types/swarm/service.go b/vendor/github.com/docker/engine-api/types/swarm/service.go
deleted file mode 100644
index d679c45..0000000
--- a/vendor/github.com/docker/engine-api/types/swarm/service.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package swarm
-
-import "time"
-
-// Service represents a service.
-type Service struct {
- ID string
- Meta
- Spec ServiceSpec `json:",omitempty"`
- Endpoint Endpoint `json:",omitempty"`
- UpdateStatus UpdateStatus `json:",omitempty"`
-}
-
-// ServiceSpec represents the spec of a service.
-type ServiceSpec struct {
- Annotations
-
- // TaskTemplate defines how the service should construct new tasks when
- // orchestrating this service.
- TaskTemplate TaskSpec `json:",omitempty"`
- Mode ServiceMode `json:",omitempty"`
- UpdateConfig *UpdateConfig `json:",omitempty"`
-
- // Networks field in ServiceSpec is being deprecated. Users of
- // engine-api should start using the same field in
- // TaskSpec. This field will be removed in future releases.
- Networks []NetworkAttachmentConfig `json:",omitempty"`
- EndpointSpec *EndpointSpec `json:",omitempty"`
-}
-
-// ServiceMode represents the mode of a service.
-type ServiceMode struct {
- Replicated *ReplicatedService `json:",omitempty"`
- Global *GlobalService `json:",omitempty"`
-}
-
-// UpdateState is the state of a service update.
-type UpdateState string
-
-const (
- // UpdateStateUpdating is the updating state.
- UpdateStateUpdating UpdateState = "updating"
- // UpdateStatePaused is the paused state.
- UpdateStatePaused UpdateState = "paused"
- // UpdateStateCompleted is the completed state.
- UpdateStateCompleted UpdateState = "completed"
-)
-
-// UpdateStatus reports the status of a service update.
-type UpdateStatus struct {
- State UpdateState `json:",omitempty"`
- StartedAt time.Time `json:",omitempty"`
- CompletedAt time.Time `json:",omitempty"`
- Message string `json:",omitempty"`
-}
-
-// ReplicatedService is a kind of ServiceMode.
-type ReplicatedService struct {
- Replicas *uint64 `json:",omitempty"`
-}
-
-// GlobalService is a kind of ServiceMode.
-type GlobalService struct{}
-
-const (
- // UpdateFailureActionPause PAUSE
- UpdateFailureActionPause = "pause"
- // UpdateFailureActionContinue CONTINUE
- UpdateFailureActionContinue = "continue"
-)
-
-// UpdateConfig represents the update configuration.
-type UpdateConfig struct {
- Parallelism uint64 `json:",omitempty"`
- Delay time.Duration `json:",omitempty"`
- FailureAction string `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/swarm.go b/vendor/github.com/docker/engine-api/types/swarm/swarm.go
deleted file mode 100644
index 23b2e6a..0000000
--- a/vendor/github.com/docker/engine-api/types/swarm/swarm.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package swarm
-
-import "time"
-
-// ClusterInfo represents info about the cluster for outputing in "info"
-// it contains the same information as "Swarm", but without the JoinTokens
-type ClusterInfo struct {
- ID string
- Meta
- Spec Spec
-}
-
-// Swarm represents a swarm.
-type Swarm struct {
- ClusterInfo
- JoinTokens JoinTokens
-}
-
-// JoinTokens contains the tokens workers and managers need to join the swarm.
-type JoinTokens struct {
- Worker string
- Manager string
-}
-
-// Spec represents the spec of a swarm.
-type Spec struct {
- Annotations
-
- Orchestration OrchestrationConfig `json:",omitempty"`
- Raft RaftConfig `json:",omitempty"`
- Dispatcher DispatcherConfig `json:",omitempty"`
- CAConfig CAConfig `json:",omitempty"`
- TaskDefaults TaskDefaults `json:",omitempty"`
-}
-
-// OrchestrationConfig represents orchestration configuration.
-type OrchestrationConfig struct {
- TaskHistoryRetentionLimit int64 `json:",omitempty"`
-}
-
-// TaskDefaults parameterizes cluster-level task creation with default values.
-type TaskDefaults struct {
- // LogDriver selects the log driver to use for tasks created in the
- // orchestrator if unspecified by a service.
- //
- // Updating this value will only have an affect on new tasks. Old tasks
- // will continue use their previously configured log driver until
- // recreated.
- LogDriver *Driver `json:",omitempty"`
-}
-
-// RaftConfig represents raft configuration.
-type RaftConfig struct {
- SnapshotInterval uint64 `json:",omitempty"`
- KeepOldSnapshots uint64 `json:",omitempty"`
- LogEntriesForSlowFollowers uint64 `json:",omitempty"`
-
- // ElectionTick is the number of ticks that a follower will wait for a message
- // from the leader before becoming a candidate and starting an election.
- // ElectionTick must be greater than HeartbeatTick.
- //
- // A tick currently defaults to one second, so these translate directly to
- // seconds currently, but this is NOT guaranteed.
- ElectionTick int
-
- // HeartbeatTick is the number of ticks between heartbeats. Every
- // HeartbeatTick ticks, the leader will send a heartbeat to the
- // followers.
- //
- // A tick currently defaults to one second, so these translate directly to
- // seconds currently, but this is NOT guaranteed.
- HeartbeatTick int
-}
-
-// DispatcherConfig represents dispatcher configuration.
-type DispatcherConfig struct {
- HeartbeatPeriod time.Duration `json:",omitempty"`
-}
-
-// CAConfig represents CA configuration.
-type CAConfig struct {
- NodeCertExpiry time.Duration `json:",omitempty"`
- ExternalCAs []*ExternalCA `json:",omitempty"`
-}
-
-// ExternalCAProtocol represents type of external CA.
-type ExternalCAProtocol string
-
-// ExternalCAProtocolCFSSL CFSSL
-const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
-
-// ExternalCA defines external CA to be used by the cluster.
-type ExternalCA struct {
- Protocol ExternalCAProtocol
- URL string
- Options map[string]string `json:",omitempty"`
-}
-
-// InitRequest is the request used to init a swarm.
-type InitRequest struct {
- ListenAddr string
- AdvertiseAddr string
- ForceNewCluster bool
- Spec Spec
-}
-
-// JoinRequest is the request used to join a swarm.
-type JoinRequest struct {
- ListenAddr string
- AdvertiseAddr string
- RemoteAddrs []string
- JoinToken string // accept by secret
-}
-
-// LocalNodeState represents the state of the local node.
-type LocalNodeState string
-
-const (
- // LocalNodeStateInactive INACTIVE
- LocalNodeStateInactive LocalNodeState = "inactive"
- // LocalNodeStatePending PENDING
- LocalNodeStatePending LocalNodeState = "pending"
- // LocalNodeStateActive ACTIVE
- LocalNodeStateActive LocalNodeState = "active"
- // LocalNodeStateError ERROR
- LocalNodeStateError LocalNodeState = "error"
-)
-
-// Info represents generic information about swarm.
-type Info struct {
- NodeID string
- NodeAddr string
-
- LocalNodeState LocalNodeState
- ControlAvailable bool
- Error string
-
- RemoteManagers []Peer
- Nodes int
- Managers int
-
- Cluster ClusterInfo
-}
-
-// Peer represents a peer.
-type Peer struct {
- NodeID string
- Addr string
-}
-
-// UpdateFlags contains flags for SwarmUpdate.
-type UpdateFlags struct {
- RotateWorkerToken bool
- RotateManagerToken bool
-}
diff --git a/vendor/github.com/docker/engine-api/types/swarm/task.go b/vendor/github.com/docker/engine-api/types/swarm/task.go
deleted file mode 100644
index 591e07b..0000000
--- a/vendor/github.com/docker/engine-api/types/swarm/task.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package swarm
-
-import "time"
-
-// TaskState represents the state of a task.
-type TaskState string
-
-const (
- // TaskStateNew NEW
- TaskStateNew TaskState = "new"
- // TaskStateAllocated ALLOCATED
- TaskStateAllocated TaskState = "allocated"
- // TaskStatePending PENDING
- TaskStatePending TaskState = "pending"
- // TaskStateAssigned ASSIGNED
- TaskStateAssigned TaskState = "assigned"
- // TaskStateAccepted ACCEPTED
- TaskStateAccepted TaskState = "accepted"
- // TaskStatePreparing PREPARING
- TaskStatePreparing TaskState = "preparing"
- // TaskStateReady READY
- TaskStateReady TaskState = "ready"
- // TaskStateStarting STARTING
- TaskStateStarting TaskState = "starting"
- // TaskStateRunning RUNNING
- TaskStateRunning TaskState = "running"
- // TaskStateComplete COMPLETE
- TaskStateComplete TaskState = "complete"
- // TaskStateShutdown SHUTDOWN
- TaskStateShutdown TaskState = "shutdown"
- // TaskStateFailed FAILED
- TaskStateFailed TaskState = "failed"
- // TaskStateRejected REJECTED
- TaskStateRejected TaskState = "rejected"
-)
-
-// Task represents a task.
-type Task struct {
- ID string
- Meta
- Annotations
-
- Spec TaskSpec `json:",omitempty"`
- ServiceID string `json:",omitempty"`
- Slot int `json:",omitempty"`
- NodeID string `json:",omitempty"`
- Status TaskStatus `json:",omitempty"`
- DesiredState TaskState `json:",omitempty"`
- NetworksAttachments []NetworkAttachment `json:",omitempty"`
-}
-
-// TaskSpec represents the spec of a task.
-type TaskSpec struct {
- ContainerSpec ContainerSpec `json:",omitempty"`
- Resources *ResourceRequirements `json:",omitempty"`
- RestartPolicy *RestartPolicy `json:",omitempty"`
- Placement *Placement `json:",omitempty"`
- Networks []NetworkAttachmentConfig `json:",omitempty"`
-
- // LogDriver specifies the LogDriver to use for tasks created from this
- // spec. If not present, the one on cluster default on swarm.Spec will be
- // used, finally falling back to the engine default if not specified.
- LogDriver *Driver `json:",omitempty"`
-}
-
-// Resources represents resources (CPU/Memory).
-type Resources struct {
- NanoCPUs int64 `json:",omitempty"`
- MemoryBytes int64 `json:",omitempty"`
-}
-
-// ResourceRequirements represents resources requirements.
-type ResourceRequirements struct {
- Limits *Resources `json:",omitempty"`
- Reservations *Resources `json:",omitempty"`
-}
-
-// Placement represents orchestration parameters.
-type Placement struct {
- Constraints []string `json:",omitempty"`
-}
-
-// RestartPolicy represents the restart policy.
-type RestartPolicy struct {
- Condition RestartPolicyCondition `json:",omitempty"`
- Delay *time.Duration `json:",omitempty"`
- MaxAttempts *uint64 `json:",omitempty"`
- Window *time.Duration `json:",omitempty"`
-}
-
-// RestartPolicyCondition represents when to restart.
-type RestartPolicyCondition string
-
-const (
- // RestartPolicyConditionNone NONE
- RestartPolicyConditionNone RestartPolicyCondition = "none"
- // RestartPolicyConditionOnFailure ON_FAILURE
- RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
- // RestartPolicyConditionAny ANY
- RestartPolicyConditionAny RestartPolicyCondition = "any"
-)
-
-// TaskStatus represents the status of a task.
-type TaskStatus struct {
- Timestamp time.Time `json:",omitempty"`
- State TaskState `json:",omitempty"`
- Message string `json:",omitempty"`
- Err string `json:",omitempty"`
- ContainerStatus ContainerStatus `json:",omitempty"`
-}
-
-// ContainerStatus represents the status of a container.
-type ContainerStatus struct {
- ContainerID string `json:",omitempty"`
- PID int `json:",omitempty"`
- ExitCode int `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/time/duration_convert.go b/vendor/github.com/docker/engine-api/types/time/duration_convert.go
deleted file mode 100644
index 63e1eec..0000000
--- a/vendor/github.com/docker/engine-api/types/time/duration_convert.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package time
-
-import (
- "strconv"
- "time"
-)
-
-// DurationToSecondsString converts the specified duration to the number
-// seconds it represents, formatted as a string.
-func DurationToSecondsString(duration time.Duration) string {
- return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
-}
diff --git a/vendor/github.com/docker/engine-api/types/time/timestamp.go b/vendor/github.com/docker/engine-api/types/time/timestamp.go
deleted file mode 100644
index d3695ba..0000000
--- a/vendor/github.com/docker/engine-api/types/time/timestamp.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package time
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-)
-
-// These are additional predefined layouts for use in Time.Format and Time.Parse
-// with --since and --until parameters for `docker logs` and `docker events`
-const (
- rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
- rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
- dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
- dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
-)
-
-// GetTimestamp tries to parse given string as golang duration,
-// then RFC3339 time and finally as a Unix timestamp. If
-// any of these were successful, it returns a Unix timestamp
-// as string otherwise returns the given value back.
-// In case of duration input, the returned timestamp is computed
-// as the given reference time minus the amount of the duration.
-func GetTimestamp(value string, reference time.Time) (string, error) {
- if d, err := time.ParseDuration(value); value != "0" && err == nil {
- return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
- }
-
- var format string
- var parseInLocation bool
-
- // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
- parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
-
- if strings.Contains(value, ".") {
- if parseInLocation {
- format = rFC3339NanoLocal
- } else {
- format = time.RFC3339Nano
- }
- } else if strings.Contains(value, "T") {
- // we want the number of colons in the T portion of the timestamp
- tcolons := strings.Count(value, ":")
- // if parseInLocation is off and we have a +/- zone offset (not Z) then
- // there will be an extra colon in the input for the tz offset subtract that
- // colon from the tcolons count
- if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
- tcolons--
- }
- if parseInLocation {
- switch tcolons {
- case 0:
- format = "2006-01-02T15"
- case 1:
- format = "2006-01-02T15:04"
- default:
- format = rFC3339Local
- }
- } else {
- switch tcolons {
- case 0:
- format = "2006-01-02T15Z07:00"
- case 1:
- format = "2006-01-02T15:04Z07:00"
- default:
- format = time.RFC3339
- }
- }
- } else if parseInLocation {
- format = dateLocal
- } else {
- format = dateWithZone
- }
-
- var t time.Time
- var err error
-
- if parseInLocation {
- t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
- } else {
- t, err = time.Parse(format, value)
- }
-
- if err != nil {
- // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp
- if strings.Contains(value, "-") {
- return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
- }
- return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
- }
-
- return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
-}
-
-// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
-// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
-// if the incoming nanosecond portion is longer or shorter than 9 digits it is
-// converted to nanoseconds. The expectation is that the seconds and
-// seconds will be used to create a time variable. For example:
-// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
-// if err == nil since := time.Unix(seconds, nanoseconds)
-// returns seconds as def(aultSeconds) if value == ""
-func ParseTimestamps(value string, def int64) (int64, int64, error) {
- if value == "" {
- return def, 0, nil
- }
- sa := strings.SplitN(value, ".", 2)
- s, err := strconv.ParseInt(sa[0], 10, 64)
- if err != nil {
- return s, 0, err
- }
- if len(sa) != 2 {
- return s, 0, nil
- }
- n, err := strconv.ParseInt(sa[1], 10, 64)
- if err != nil {
- return s, n, err
- }
- // should already be in nanoseconds but just in case convert n to nanoseonds
- n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
- return s, n, nil
-}
diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go
deleted file mode 100644
index 76d7f69..0000000
--- a/vendor/github.com/docker/engine-api/types/types.go
+++ /dev/null
@@ -1,519 +0,0 @@
-package types
-
-import (
- "os"
- "time"
-
- "github.com/docker/engine-api/types/container"
- "github.com/docker/engine-api/types/mount"
- "github.com/docker/engine-api/types/network"
- "github.com/docker/engine-api/types/registry"
- "github.com/docker/engine-api/types/swarm"
- "github.com/docker/go-connections/nat"
-)
-
-// ContainerCreateResponse contains the information returned to a client on the
-// creation of a new container.
-type ContainerCreateResponse struct {
- // ID is the ID of the created container.
- ID string `json:"Id"`
-
- // Warnings are any warnings encountered during the creation of the container.
- Warnings []string `json:"Warnings"`
-}
-
-// ContainerExecCreateResponse contains response of Remote API:
-// POST "/containers/{name:.*}/exec"
-type ContainerExecCreateResponse struct {
- // ID is the exec ID.
- ID string `json:"Id"`
-}
-
-// ContainerUpdateResponse contains response of Remote API:
-// POST "/containers/{name:.*}/update"
-type ContainerUpdateResponse struct {
- // Warnings are any warnings encountered during the updating of the container.
- Warnings []string `json:"Warnings"`
-}
-
-// AuthResponse contains response of Remote API:
-// POST "/auth"
-type AuthResponse struct {
- // Status is the authentication status
- Status string `json:"Status"`
-
- // IdentityToken is an opaque token used for authenticating
- // a user after a successful login.
- IdentityToken string `json:"IdentityToken,omitempty"`
-}
-
-// ContainerWaitResponse contains response of Remote API:
-// POST "/containers/"+containerID+"/wait"
-type ContainerWaitResponse struct {
- // StatusCode is the status code of the wait job
- StatusCode int `json:"StatusCode"`
-}
-
-// ContainerCommitResponse contains response of Remote API:
-// POST "/commit?container="+containerID
-type ContainerCommitResponse struct {
- ID string `json:"Id"`
-}
-
-// ContainerChange contains response of Remote API:
-// GET "/containers/{name:.*}/changes"
-type ContainerChange struct {
- Kind int
- Path string
-}
-
-// ImageHistory contains response of Remote API:
-// GET "/images/{name:.*}/history"
-type ImageHistory struct {
- ID string `json:"Id"`
- Created int64
- CreatedBy string
- Tags []string
- Size int64
- Comment string
-}
-
-// ImageDelete contains response of Remote API:
-// DELETE "/images/{name:.*}"
-type ImageDelete struct {
- Untagged string `json:",omitempty"`
- Deleted string `json:",omitempty"`
-}
-
-// Image contains response of Remote API:
-// GET "/images/json"
-type Image struct {
- ID string `json:"Id"`
- ParentID string `json:"ParentId"`
- RepoTags []string
- RepoDigests []string
- Created int64
- Size int64
- VirtualSize int64
- Labels map[string]string
-}
-
-// GraphDriverData returns Image's graph driver config info
-// when calling inspect command
-type GraphDriverData struct {
- Name string
- Data map[string]string
-}
-
-// RootFS returns Image's RootFS description including the layer IDs.
-type RootFS struct {
- Type string
- Layers []string `json:",omitempty"`
- BaseLayer string `json:",omitempty"`
-}
-
-// ImageInspect contains response of Remote API:
-// GET "/images/{name:.*}/json"
-type ImageInspect struct {
- ID string `json:"Id"`
- RepoTags []string
- RepoDigests []string
- Parent string
- Comment string
- Created string
- Container string
- ContainerConfig *container.Config
- DockerVersion string
- Author string
- Config *container.Config
- Architecture string
- Os string
- Size int64
- VirtualSize int64
- GraphDriver GraphDriverData
- RootFS RootFS
-}
-
-// Port stores open ports info of container
-// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"}
-type Port struct {
- IP string `json:",omitempty"`
- PrivatePort int
- PublicPort int `json:",omitempty"`
- Type string
-}
-
-// Container contains response of Remote API:
-// GET "/containers/json"
-type Container struct {
- ID string `json:"Id"`
- Names []string
- Image string
- ImageID string
- Command string
- Created int64
- Ports []Port
- SizeRw int64 `json:",omitempty"`
- SizeRootFs int64 `json:",omitempty"`
- Labels map[string]string
- State string
- Status string
- HostConfig struct {
- NetworkMode string `json:",omitempty"`
- }
- NetworkSettings *SummaryNetworkSettings
- Mounts []MountPoint
-}
-
-// CopyConfig contains request body of Remote API:
-// POST "/containers/"+containerID+"/copy"
-type CopyConfig struct {
- Resource string
-}
-
-// ContainerPathStat is used to encode the header from
-// GET "/containers/{name:.*}/archive"
-// "Name" is the file or directory name.
-type ContainerPathStat struct {
- Name string `json:"name"`
- Size int64 `json:"size"`
- Mode os.FileMode `json:"mode"`
- Mtime time.Time `json:"mtime"`
- LinkTarget string `json:"linkTarget"`
-}
-
-// ContainerProcessList contains response of Remote API:
-// GET "/containers/{name:.*}/top"
-type ContainerProcessList struct {
- Processes [][]string
- Titles []string
-}
-
-// Version contains response of Remote API:
-// GET "/version"
-type Version struct {
- Version string
- APIVersion string `json:"ApiVersion"`
- GitCommit string
- GoVersion string
- Os string
- Arch string
- KernelVersion string `json:",omitempty"`
- Experimental bool `json:",omitempty"`
- BuildTime string `json:",omitempty"`
-}
-
-// Info contains response of Remote API:
-// GET "/info"
-type Info struct {
- ID string
- Containers int
- ContainersRunning int
- ContainersPaused int
- ContainersStopped int
- Images int
- Driver string
- DriverStatus [][2]string
- SystemStatus [][2]string
- Plugins PluginsInfo
- MemoryLimit bool
- SwapLimit bool
- KernelMemory bool
- CPUCfsPeriod bool `json:"CpuCfsPeriod"`
- CPUCfsQuota bool `json:"CpuCfsQuota"`
- CPUShares bool
- CPUSet bool
- IPv4Forwarding bool
- BridgeNfIptables bool
- BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
- Debug bool
- NFd int
- OomKillDisable bool
- NGoroutines int
- SystemTime string
- LoggingDriver string
- CgroupDriver string
- NEventsListener int
- KernelVersion string
- OperatingSystem string
- OSType string
- Architecture string
- IndexServerAddress string
- RegistryConfig *registry.ServiceConfig
- NCPU int
- MemTotal int64
- DockerRootDir string
- HTTPProxy string `json:"HttpProxy"`
- HTTPSProxy string `json:"HttpsProxy"`
- NoProxy string
- Name string
- Labels []string
- ExperimentalBuild bool
- ServerVersion string
- ClusterStore string
- ClusterAdvertise string
- SecurityOptions []string
- Runtimes map[string]Runtime
- DefaultRuntime string
- Swarm swarm.Info
- // LiveRestoreEnabled determines whether containers should be kept
- // running when the daemon is shutdown or upon daemon start if
- // running containers are detected
- LiveRestoreEnabled bool
-}
-
-// PluginsInfo is a temp struct holding Plugins name
-// registered with docker daemon. It is used by Info struct
-type PluginsInfo struct {
- // List of Volume plugins registered
- Volume []string
- // List of Network plugins registered
- Network []string
- // List of Authorization plugins registered
- Authorization []string
-}
-
-// ExecStartCheck is a temp struct used by execStart
-// Config fields is part of ExecConfig in runconfig package
-type ExecStartCheck struct {
- // ExecStart will first check if it's detached
- Detach bool
- // Check if there's a tty
- Tty bool
-}
-
-// HealthcheckResult stores information about a single run of a healthcheck probe
-type HealthcheckResult struct {
- Start time.Time // Start is the time this check started
- End time.Time // End is the time this check ended
- ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
- Output string // Output from last check
-}
-
-// Health states
-const (
- Starting = "starting" // Starting indicates that the container is not yet ready
- Healthy = "healthy" // Healthy indicates that the container is running correctly
- Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
-)
-
-// Health stores information about the container's healthcheck results
-type Health struct {
- Status string // Status is one of Starting, Healthy or Unhealthy
- FailingStreak int // FailingStreak is the number of consecutive failures
- Log []*HealthcheckResult // Log contains the last few results (oldest first)
-}
-
-// ContainerState stores container's running state
-// it's part of ContainerJSONBase and will return by "inspect" command
-type ContainerState struct {
- Status string
- Running bool
- Paused bool
- Restarting bool
- OOMKilled bool
- Dead bool
- Pid int
- ExitCode int
- Error string
- StartedAt string
- FinishedAt string
- Health *Health `json:",omitempty"`
-}
-
-// ContainerNode stores information about the node that a container
-// is running on. It's only available in Docker Swarm
-type ContainerNode struct {
- ID string
- IPAddress string `json:"IP"`
- Addr string
- Name string
- Cpus int
- Memory int64
- Labels map[string]string
-}
-
-// ContainerJSONBase contains response of Remote API:
-// GET "/containers/{name:.*}/json"
-type ContainerJSONBase struct {
- ID string `json:"Id"`
- Created string
- Path string
- Args []string
- State *ContainerState
- Image string
- ResolvConfPath string
- HostnamePath string
- HostsPath string
- LogPath string
- Node *ContainerNode `json:",omitempty"`
- Name string
- RestartCount int
- Driver string
- MountLabel string
- ProcessLabel string
- AppArmorProfile string
- ExecIDs []string
- HostConfig *container.HostConfig
- GraphDriver GraphDriverData
- SizeRw *int64 `json:",omitempty"`
- SizeRootFs *int64 `json:",omitempty"`
-}
-
-// ContainerJSON is newly used struct along with MountPoint
-type ContainerJSON struct {
- *ContainerJSONBase
- Mounts []MountPoint
- Config *container.Config
- NetworkSettings *NetworkSettings
-}
-
-// NetworkSettings exposes the network settings in the api
-type NetworkSettings struct {
- NetworkSettingsBase
- DefaultNetworkSettings
- Networks map[string]*network.EndpointSettings
-}
-
-// SummaryNetworkSettings provides a summary of container's networks
-// in /containers/json
-type SummaryNetworkSettings struct {
- Networks map[string]*network.EndpointSettings
-}
-
-// NetworkSettingsBase holds basic information about networks
-type NetworkSettingsBase struct {
- Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
- SandboxID string // SandboxID uniquely represents a container's network stack
- HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
- LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
- LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
- Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
- SandboxKey string // SandboxKey identifies the sandbox
- SecondaryIPAddresses []network.Address
- SecondaryIPv6Addresses []network.Address
-}
-
-// DefaultNetworkSettings holds network information
-// during the 2 release deprecation period.
-// It will be removed in Docker 1.11.
-type DefaultNetworkSettings struct {
- EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
- Gateway string // Gateway holds the gateway address for the network
- GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
- GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
- IPAddress string // IPAddress holds the IPv4 address for the network
- IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
- IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
- MacAddress string // MacAddress holds the MAC address for the network
-}
-
-// MountPoint represents a mount point configuration inside the container.
-// This is used for reporting the mountpoints in use by a container.
-type MountPoint struct {
- Type mount.Type `json:",omitempty"`
- Name string `json:",omitempty"`
- Source string
- Destination string
- Driver string `json:",omitempty"`
- Mode string
- RW bool
- Propagation mount.Propagation
-}
-
-// Volume represents the configuration of a volume for the remote API
-type Volume struct {
- Name string // Name is the name of the volume
- Driver string // Driver is the Driver name used to create the volume
- Mountpoint string // Mountpoint is the location on disk of the volume
- Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume
- Labels map[string]string // Labels is metadata specific to the volume
- Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level)
-}
-
-// VolumesListResponse contains the response for the remote API:
-// GET "/volumes"
-type VolumesListResponse struct {
- Volumes []*Volume // Volumes is the list of volumes being returned
- Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers
-}
-
-// VolumeCreateRequest contains the response for the remote API:
-// POST "/volumes/create"
-type VolumeCreateRequest struct {
- Name string // Name is the requested name of the volume
- Driver string // Driver is the name of the driver that should be used to create the volume
- DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume.
- Labels map[string]string // Labels holds metadata specific to the volume being created.
-}
-
-// NetworkResource is the body of the "get network" http response message
-type NetworkResource struct {
- Name string // Name is the requested name of the network
- ID string `json:"Id"` // ID uniquely identifies a network on a single machine
- Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level)
- Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
- EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
- IPAM network.IPAM // IPAM is the network's IP Address Management
- Internal bool // Internal represents if the network is used internal only
- Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
- Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
- Options map[string]string // Options holds the network specific options to use for when creating the network
- Labels map[string]string // Labels holds metadata specific to the network being created
-}
-
-// EndpointResource contains network resources allocated and used for a container in a network
-type EndpointResource struct {
- Name string
- EndpointID string
- MacAddress string
- IPv4Address string
- IPv6Address string
-}
-
-// NetworkCreate is the expected body of the "create network" http request message
-type NetworkCreate struct {
- CheckDuplicate bool
- Driver string
- EnableIPv6 bool
- IPAM *network.IPAM
- Internal bool
- Attachable bool
- Options map[string]string
- Labels map[string]string
-}
-
-// NetworkCreateRequest is the request message sent to the server for network create call.
-type NetworkCreateRequest struct {
- NetworkCreate
- Name string
-}
-
-// NetworkCreateResponse is the response message sent by the server for network create call
-type NetworkCreateResponse struct {
- ID string `json:"Id"`
- Warning string
-}
-
-// NetworkConnect represents the data to be used to connect a container to the network
-type NetworkConnect struct {
- Container string
- EndpointConfig *network.EndpointSettings `json:",omitempty"`
-}
-
-// NetworkDisconnect represents the data to be used to disconnect a container from the network
-type NetworkDisconnect struct {
- Container string
- Force bool
-}
-
-// Checkpoint represents the details of a checkpoint
-type Checkpoint struct {
- Name string // Name is the name of the checkpoint
-}
-
-// Runtime describes an OCI runtime
-type Runtime struct {
- Path string `json:"path"`
- Args []string `json:"runtimeArgs,omitempty"`
-}
diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md
deleted file mode 100644
index cdac50a..0000000
--- a/vendor/github.com/docker/engine-api/types/versions/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Legacy API type versions
-
-This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
-
-Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
-
-### Package name conventions
-
-The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
-
-1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
-2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
-
-For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
diff --git a/vendor/github.com/docker/engine-api/types/versions/compare.go b/vendor/github.com/docker/engine-api/types/versions/compare.go
deleted file mode 100644
index 611d4fe..0000000
--- a/vendor/github.com/docker/engine-api/types/versions/compare.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package versions
-
-import (
- "strconv"
- "strings"
-)
-
-// compare compares two version strings
-// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
-func compare(v1, v2 string) int {
- var (
- currTab = strings.Split(v1, ".")
- otherTab = strings.Split(v2, ".")
- )
-
- max := len(currTab)
- if len(otherTab) > max {
- max = len(otherTab)
- }
- for i := 0; i < max; i++ {
- var currInt, otherInt int
-
- if len(currTab) > i {
- currInt, _ = strconv.Atoi(currTab[i])
- }
- if len(otherTab) > i {
- otherInt, _ = strconv.Atoi(otherTab[i])
- }
- if currInt > otherInt {
- return 1
- }
- if otherInt > currInt {
- return -1
- }
- }
- return 0
-}
-
-// LessThan checks if a version is less than another
-func LessThan(v, other string) bool {
- return compare(v, other) == -1
-}
-
-// LessThanOrEqualTo checks if a version is less than or equal to another
-func LessThanOrEqualTo(v, other string) bool {
- return compare(v, other) <= 0
-}
-
-// GreaterThan checks if a version is greater than another
-func GreaterThan(v, other string) bool {
- return compare(v, other) == 1
-}
-
-// GreaterThanOrEqualTo checks if a version is greater than or equal to another
-func GreaterThanOrEqualTo(v, other string) bool {
- return compare(v, other) >= 0
-}
-
-// Equal checks if a version is equal to another
-func Equal(v, other string) bool {
- return compare(v, other) == 0
-}
diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE
deleted file mode 100644
index b55b37b..0000000
--- a/vendor/github.com/docker/go-connections/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2015 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go
deleted file mode 100644
index 4d5f5ae..0000000
--- a/vendor/github.com/docker/go-connections/nat/nat.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Package nat is a convenience package for manipulation of strings describing network ports.
-package nat
-
-import (
- "fmt"
- "net"
- "strconv"
- "strings"
-)
-
-const (
- // portSpecTemplate is the expected format for port specifications
- portSpecTemplate = "ip:hostPort:containerPort"
-)
-
-// PortBinding represents a binding between a Host IP address and a Host Port
-type PortBinding struct {
- // HostIP is the host IP Address
- HostIP string `json:"HostIp"`
- // HostPort is the host port number
- HostPort string
-}
-
-// PortMap is a collection of PortBinding indexed by Port
-type PortMap map[Port][]PortBinding
-
-// PortSet is a collection of structs indexed by Port
-type PortSet map[Port]struct{}
-
-// Port is a string containing port number and protocol in the format "80/tcp"
-type Port string
-
-// NewPort creates a new instance of a Port given a protocol and port number or port range
-func NewPort(proto, port string) (Port, error) {
- // Check for parsing issues on "port" now so we can avoid having
- // to check it later on.
-
- portStartInt, portEndInt, err := ParsePortRangeToInt(port)
- if err != nil {
- return "", err
- }
-
- if portStartInt == portEndInt {
- return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
- }
- return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
-}
-
-// ParsePort parses the port number string and returns an int
-func ParsePort(rawPort string) (int, error) {
- if len(rawPort) == 0 {
- return 0, nil
- }
- port, err := strconv.ParseUint(rawPort, 10, 16)
- if err != nil {
- return 0, err
- }
- return int(port), nil
-}
-
-// ParsePortRangeToInt parses the port range string and returns start/end ints
-func ParsePortRangeToInt(rawPort string) (int, int, error) {
- if len(rawPort) == 0 {
- return 0, 0, nil
- }
- start, end, err := ParsePortRange(rawPort)
- if err != nil {
- return 0, 0, err
- }
- return int(start), int(end), nil
-}
-
-// Proto returns the protocol of a Port
-func (p Port) Proto() string {
- proto, _ := SplitProtoPort(string(p))
- return proto
-}
-
-// Port returns the port number of a Port
-func (p Port) Port() string {
- _, port := SplitProtoPort(string(p))
- return port
-}
-
-// Int returns the port number of a Port as an int
-func (p Port) Int() int {
- portStr := p.Port()
- // We don't need to check for an error because we're going to
- // assume that any error would have been found, and reported, in NewPort()
- port, _ := ParsePort(portStr)
- return port
-}
-
-// Range returns the start/end port numbers of a Port range as ints
-func (p Port) Range() (int, int, error) {
- return ParsePortRangeToInt(p.Port())
-}
-
-// SplitProtoPort splits a port in the format of proto/port
-func SplitProtoPort(rawPort string) (string, string) {
- parts := strings.Split(rawPort, "/")
- l := len(parts)
- if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
- return "", ""
- }
- if l == 1 {
- return "tcp", rawPort
- }
- if len(parts[1]) == 0 {
- return "tcp", parts[0]
- }
- return parts[1], parts[0]
-}
-
-func validateProto(proto string) bool {
- for _, availableProto := range []string{"tcp", "udp"} {
- if availableProto == proto {
- return true
- }
- }
- return false
-}
-
-// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
-// these in to the internal types
-func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
- var (
- exposedPorts = make(map[Port]struct{}, len(ports))
- bindings = make(map[Port][]PortBinding)
- )
- for _, rawPort := range ports {
- portMappings, err := ParsePortSpec(rawPort)
- if err != nil {
- return nil, nil, err
- }
-
- for _, portMapping := range portMappings {
- port := portMapping.Port
- if _, exists := exposedPorts[port]; !exists {
- exposedPorts[port] = struct{}{}
- }
- bslice, exists := bindings[port]
- if !exists {
- bslice = []PortBinding{}
- }
- bindings[port] = append(bslice, portMapping.Binding)
- }
- }
- return exposedPorts, bindings, nil
-}
-
-// PortMapping is a data object mapping a Port to a PortBinding
-type PortMapping struct {
- Port Port
- Binding PortBinding
-}
-
-func splitParts(rawport string) (string, string, string) {
- parts := strings.Split(rawport, ":")
- n := len(parts)
- containerport := parts[n-1]
-
- switch n {
- case 1:
- return "", "", containerport
- case 2:
- return "", parts[0], containerport
- case 3:
- return parts[0], parts[1], containerport
- default:
- return strings.Join(parts[:n-2], ":"), parts[n-2], containerport
- }
-}
-
-// ParsePortSpec parses a port specification string into a slice of PortMappings
-func ParsePortSpec(rawPort string) ([]PortMapping, error) {
- var proto string
- rawIP, hostPort, containerPort := splitParts(rawPort)
- proto, containerPort = SplitProtoPort(containerPort)
-
- // Strip [] from IPV6 addresses
- ip, _, err := net.SplitHostPort(rawIP + ":")
- if err != nil {
- return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err)
- }
- if ip != "" && net.ParseIP(ip) == nil {
- return nil, fmt.Errorf("Invalid ip address: %s", ip)
- }
- if containerPort == "" {
- return nil, fmt.Errorf("No port specified: %s", rawPort)
- }
-
- startPort, endPort, err := ParsePortRange(containerPort)
- if err != nil {
- return nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
- }
-
- var startHostPort, endHostPort uint64 = 0, 0
- if len(hostPort) > 0 {
- startHostPort, endHostPort, err = ParsePortRange(hostPort)
- if err != nil {
- return nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
- }
- }
-
- if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
- // Allow host port range iff containerPort is not a range.
- // In this case, use the host port range as the dynamic
- // host port range to allocate into.
- if endPort != startPort {
- return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
- }
- }
-
- if !validateProto(strings.ToLower(proto)) {
- return nil, fmt.Errorf("Invalid proto: %s", proto)
- }
-
- ports := []PortMapping{}
- for i := uint64(0); i <= (endPort - startPort); i++ {
- containerPort = strconv.FormatUint(startPort+i, 10)
- if len(hostPort) > 0 {
- hostPort = strconv.FormatUint(startHostPort+i, 10)
- }
- // Set hostPort to a range only if there is a single container port
- // and a dynamic host port.
- if startPort == endPort && startHostPort != endHostPort {
- hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
- }
- port, err := NewPort(strings.ToLower(proto), containerPort)
- if err != nil {
- return nil, err
- }
-
- binding := PortBinding{
- HostIP: ip,
- HostPort: hostPort,
- }
- ports = append(ports, PortMapping{Port: port, Binding: binding})
- }
- return ports, nil
-}
diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go
deleted file mode 100644
index 892adf8..0000000
--- a/vendor/github.com/docker/go-connections/nat/parse.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package nat
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// PartParser parses and validates the specified string (data) using the specified template
-// e.g. ip:public:private -> 192.168.0.1:80:8000
-// DEPRECATED: do not use, this function may be removed in a future version
-func PartParser(template, data string) (map[string]string, error) {
- // ip:public:private
- var (
- templateParts = strings.Split(template, ":")
- parts = strings.Split(data, ":")
- out = make(map[string]string, len(templateParts))
- )
- if len(parts) != len(templateParts) {
- return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
- }
-
- for i, t := range templateParts {
- value := ""
- if len(parts) > i {
- value = parts[i]
- }
- out[t] = value
- }
- return out, nil
-}
-
-// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
-func ParsePortRange(ports string) (uint64, uint64, error) {
- if ports == "" {
- return 0, 0, fmt.Errorf("Empty string specified for ports.")
- }
- if !strings.Contains(ports, "-") {
- start, err := strconv.ParseUint(ports, 10, 16)
- end := start
- return start, end, err
- }
-
- parts := strings.Split(ports, "-")
- start, err := strconv.ParseUint(parts[0], 10, 16)
- if err != nil {
- return 0, 0, err
- }
- end, err := strconv.ParseUint(parts[1], 10, 16)
- if err != nil {
- return 0, 0, err
- }
- if end < start {
- return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
- }
- return start, end, nil
-}
diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go
deleted file mode 100644
index ce95017..0000000
--- a/vendor/github.com/docker/go-connections/nat/sort.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package nat
-
-import (
- "sort"
- "strings"
-)
-
-type portSorter struct {
- ports []Port
- by func(i, j Port) bool
-}
-
-func (s *portSorter) Len() int {
- return len(s.ports)
-}
-
-func (s *portSorter) Swap(i, j int) {
- s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
-}
-
-func (s *portSorter) Less(i, j int) bool {
- ip := s.ports[i]
- jp := s.ports[j]
-
- return s.by(ip, jp)
-}
-
-// Sort sorts a list of ports using the provided predicate
-// This function should compare `i` and `j`, returning true if `i` is
-// considered to be less than `j`
-func Sort(ports []Port, predicate func(i, j Port) bool) {
- s := &portSorter{ports, predicate}
- sort.Sort(s)
-}
-
-type portMapEntry struct {
- port Port
- binding PortBinding
-}
-
-type portMapSorter []portMapEntry
-
-func (s portMapSorter) Len() int { return len(s) }
-func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// sort the port so that the order is:
-// 1. port with larger specified bindings
-// 2. larger port
-// 3. port with tcp protocol
-func (s portMapSorter) Less(i, j int) bool {
- pi, pj := s[i].port, s[j].port
- hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
- return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
-}
-
-// SortPortMap sorts the list of ports and their respected mapping. The ports
-// will explicit HostPort will be placed first.
-func SortPortMap(ports []Port, bindings PortMap) {
- s := portMapSorter{}
- for _, p := range ports {
- if binding, ok := bindings[p]; ok {
- for _, b := range binding {
- s = append(s, portMapEntry{port: p, binding: b})
- }
- bindings[p] = []PortBinding{}
- } else {
- s = append(s, portMapEntry{port: p})
- }
- }
-
- sort.Sort(s)
- var (
- i int
- pm = make(map[Port]struct{})
- )
- // reorder ports
- for _, entry := range s {
- if _, ok := pm[entry.port]; !ok {
- ports[i] = entry.port
- pm[entry.port] = struct{}{}
- i++
- }
- // reorder bindings for this port
- if _, ok := bindings[entry.port]; ok {
- bindings[entry.port] = append(bindings[entry.port], entry.binding)
- }
- }
-}
-
-func toInt(s string) uint64 {
- i, _, err := ParsePortRange(s)
- if err != nil {
- i = 0
- }
- return i
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md
deleted file mode 100644
index e69de29..0000000
diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
deleted file mode 100644
index 99846ff..0000000
--- a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package sockets
-
-import (
- "errors"
- "net"
- "sync"
-)
-
-var errClosed = errors.New("use of closed network connection")
-
-// InmemSocket implements net.Listener using in-memory only connections.
-type InmemSocket struct {
- chConn chan net.Conn
- chClose chan struct{}
- addr string
- mu sync.Mutex
-}
-
-// dummyAddr is used to satisfy net.Addr for the in-mem socket
-// it is just stored as a string and returns the string for all calls
-type dummyAddr string
-
-// NewInmemSocket creates an in-memory only net.Listener
-// The addr argument can be any string, but is used to satisfy the `Addr()` part
-// of the net.Listener interface
-func NewInmemSocket(addr string, bufSize int) *InmemSocket {
- return &InmemSocket{
- chConn: make(chan net.Conn, bufSize),
- chClose: make(chan struct{}),
- addr: addr,
- }
-}
-
-// Addr returns the socket's addr string to satisfy net.Listener
-func (s *InmemSocket) Addr() net.Addr {
- return dummyAddr(s.addr)
-}
-
-// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
-func (s *InmemSocket) Accept() (net.Conn, error) {
- select {
- case conn := <-s.chConn:
- return conn, nil
- case <-s.chClose:
- return nil, errClosed
- }
-}
-
-// Close closes the listener. It will be unavailable for use once closed.
-func (s *InmemSocket) Close() error {
- s.mu.Lock()
- defer s.mu.Unlock()
- select {
- case <-s.chClose:
- default:
- close(s.chClose)
- }
- return nil
-}
-
-// Dial is used to establish a connection with the in-mem server
-func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
- srvConn, clientConn := net.Pipe()
- select {
- case s.chConn <- srvConn:
- case <-s.chClose:
- return nil, errClosed
- }
-
- return clientConn, nil
-}
-
-// Network returns the addr string, satisfies net.Addr
-func (a dummyAddr) Network() string {
- return string(a)
-}
-
-// String returns the string form
-func (a dummyAddr) String() string {
- return string(a)
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go
deleted file mode 100644
index 98e9a1d..0000000
--- a/vendor/github.com/docker/go-connections/sockets/proxy.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package sockets
-
-import (
- "net"
- "net/url"
- "os"
- "strings"
-
- "golang.org/x/net/proxy"
-)
-
-// GetProxyEnv allows access to the uppercase and the lowercase forms of
-// proxy-related variables. See the Go specification for details on these
-// variables. https://golang.org/pkg/net/http/
-func GetProxyEnv(key string) string {
- proxyValue := os.Getenv(strings.ToUpper(key))
- if proxyValue == "" {
- return os.Getenv(strings.ToLower(key))
- }
- return proxyValue
-}
-
-// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
-// proxy.Dialer which will route the connections through the proxy using the
-// given dialer.
-func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
- allProxy := GetProxyEnv("all_proxy")
- if len(allProxy) == 0 {
- return direct, nil
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return direct, err
- }
-
- proxyFromURL, err := proxy.FromURL(proxyURL, direct)
- if err != nil {
- return direct, err
- }
-
- noProxy := GetProxyEnv("no_proxy")
- if len(noProxy) == 0 {
- return proxyFromURL, nil
- }
-
- perHost := proxy.NewPerHost(proxyFromURL, direct)
- perHost.AddFromString(noProxy)
-
- return perHost, nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go
deleted file mode 100644
index a1d7beb..0000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Package sockets provides helper functions to create and configure Unix or TCP sockets.
-package sockets
-
-import (
- "errors"
- "net"
- "net/http"
- "time"
-)
-
-// Why 32? See https://github.com/docker/docker/pull/8035.
-const defaultTimeout = 32 * time.Second
-
-// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
-var ErrProtocolNotAvailable = errors.New("protocol not available")
-
-// ConfigureTransport configures the specified Transport according to the
-// specified proto and addr.
-// If the proto is unix (using a unix socket to communicate) or npipe the
-// compression is disabled.
-func ConfigureTransport(tr *http.Transport, proto, addr string) error {
- switch proto {
- case "unix":
- return configureUnixTransport(tr, proto, addr)
- case "npipe":
- return configureNpipeTransport(tr, proto, addr)
- default:
- tr.Proxy = http.ProxyFromEnvironment
- dialer, err := DialerFromEnvironment(&net.Dialer{
- Timeout: defaultTimeout,
- })
- if err != nil {
- return err
- }
- tr.Dial = dialer.Dial
- }
- return nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
deleted file mode 100644
index 386cf0d..0000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build !windows
-
-package sockets
-
-import (
- "fmt"
- "net"
- "net/http"
- "syscall"
- "time"
-)
-
-const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
-
-func configureUnixTransport(tr *http.Transport, proto, addr string) error {
- if len(addr) > maxUnixSocketPathSize {
- return fmt.Errorf("Unix socket path %q is too long", addr)
- }
- // No need for compression in local communications.
- tr.DisableCompression = true
- tr.Dial = func(_, _ string) (net.Conn, error) {
- return net.DialTimeout(proto, addr, defaultTimeout)
- }
- return nil
-}
-
-func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
- return ErrProtocolNotAvailable
-}
-
-// DialPipe connects to a Windows named pipe.
-// This is not supported on other OSes.
-func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
- return nil, syscall.EAFNOSUPPORT
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
deleted file mode 100644
index 5c21644..0000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package sockets
-
-import (
- "net"
- "net/http"
- "time"
-
- "github.com/Microsoft/go-winio"
-)
-
-func configureUnixTransport(tr *http.Transport, proto, addr string) error {
- return ErrProtocolNotAvailable
-}
-
-func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
- // No need for compression in local communications.
- tr.DisableCompression = true
- tr.Dial = func(_, _ string) (net.Conn, error) {
- return DialPipe(addr, defaultTimeout)
- }
- return nil
-}
-
-// DialPipe connects to a Windows named pipe.
-func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
- return winio.DialPipe(addr, &timeout)
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
deleted file mode 100644
index 53cbb6c..0000000
--- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Package sockets provides helper functions to create and configure Unix or TCP sockets.
-package sockets
-
-import (
- "crypto/tls"
- "net"
-)
-
-// NewTCPSocket creates a TCP socket listener with the specified address and
-// the specified tls configuration. If TLSConfig is set, will encapsulate the
-// TCP listener inside a TLS one.
-func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
- l, err := net.Listen("tcp", addr)
- if err != nil {
- return nil, err
- }
- if tlsConfig != nil {
- tlsConfig.NextProtos = []string{"http/1.1"}
- l = tls.NewListener(l, tlsConfig)
- }
- return l, nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
deleted file mode 100644
index d162734..0000000
--- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// +build linux freebsd solaris
-
-package sockets
-
-import (
- "fmt"
- "net"
- "os"
- "strconv"
- "syscall"
-
- "github.com/Sirupsen/logrus"
- "github.com/opencontainers/runc/libcontainer/user"
-)
-
-// NewUnixSocket creates a unix socket with the specified path and group.
-func NewUnixSocket(path, group string) (net.Listener, error) {
- if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
- return nil, err
- }
- mask := syscall.Umask(0777)
- defer syscall.Umask(mask)
- l, err := net.Listen("unix", path)
- if err != nil {
- return nil, err
- }
- if err := setSocketGroup(path, group); err != nil {
- l.Close()
- return nil, err
- }
- if err := os.Chmod(path, 0660); err != nil {
- l.Close()
- return nil, err
- }
- return l, nil
-}
-
-func setSocketGroup(path, group string) error {
- if group == "" {
- return nil
- }
- if err := changeGroup(path, group); err != nil {
- if group != "docker" {
- return err
- }
- logrus.Debugf("Warning: could not change group %s to docker: %v", path, err)
- }
- return nil
-}
-
-func changeGroup(path string, nameOrGid string) error {
- gid, err := lookupGidByName(nameOrGid)
- if err != nil {
- return err
- }
- logrus.Debugf("%s group found. gid: %d", nameOrGid, gid)
- return os.Chown(path, 0, gid)
-}
-
-func lookupGidByName(nameOrGid string) (int, error) {
- groupFile, err := user.GetGroupPath()
- if err != nil {
- return -1, err
- }
- groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
- return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
- })
- if err != nil {
- return -1, err
- }
- if groups != nil && len(groups) > 0 {
- return groups[0].Gid, nil
- }
- gid, err := strconv.Atoi(nameOrGid)
- if err == nil {
- logrus.Warnf("Could not find GID %d", gid)
- return gid, nil
- }
- return -1, fmt.Errorf("Group %s not found", nameOrGid)
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
deleted file mode 100644
index 352d342..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build go1.7
-
-package tlsconfig
-
-import (
- "crypto/x509"
- "runtime"
-
- "github.com/Sirupsen/logrus"
-)
-
-// SystemCertPool returns a copy of the system cert pool,
-// returns an error if failed to load or empty pool on windows.
-func SystemCertPool() (*x509.CertPool, error) {
- certpool, err := x509.SystemCertPool()
- if err != nil && runtime.GOOS == "windows" {
- logrus.Warnf("Unable to use system certificate pool: %v", err)
- return x509.NewCertPool(), nil
- }
- return certpool, err
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
deleted file mode 100644
index 262c95e..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build !go1.7
-
-package tlsconfig
-
-import (
- "crypto/x509"
-
- "github.com/Sirupsen/logrus"
-)
-
-// SystemCertPool returns an new empty cert pool,
-// accessing system cert pool is supported in go 1.7
-func SystemCertPool() (*x509.CertPool, error) {
- logrus.Warn("Unable to use system certificate pool: requires building with go 1.7 or later")
- return x509.NewCertPool(), nil
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go
deleted file mode 100644
index 8bbffcf..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/config.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
-//
-// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
-// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
-// A Config may be reused; the tls package will also not modify it.
-package tlsconfig
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "os"
-
- "github.com/Sirupsen/logrus"
-)
-
-// Options represents the information needed to create client and server TLS configurations.
-type Options struct {
- CAFile string
-
- // If either CertFile or KeyFile is empty, Client() will not load them
- // preventing the client from authenticating to the server.
- // However, Server() requires them and will error out if they are empty.
- CertFile string
- KeyFile string
-
- // client-only option
- InsecureSkipVerify bool
- // server-only option
- ClientAuth tls.ClientAuthType
-}
-
-// Extra (server-side) accepted CBC cipher suites - will phase out in the future
-var acceptedCBCCiphers = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
-}
-
-// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
-// options struct but wants to use a commonly accepted set of TLS cipher suites, with
-// known weak algorithms removed.
-var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
-
-// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
-func ServerDefault() *tls.Config {
- return &tls.Config{
- // Avoid fallback to SSL protocols < TLS1.0
- MinVersion: tls.VersionTLS10,
- PreferServerCipherSuites: true,
- CipherSuites: DefaultServerAcceptedCiphers,
- }
-}
-
-// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
-func ClientDefault() *tls.Config {
- return &tls.Config{
- // Prefer TLS1.2 as the client minimum
- MinVersion: tls.VersionTLS12,
- CipherSuites: clientCipherSuites,
- }
-}
-
-// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
-func certPool(caFile string) (*x509.CertPool, error) {
- // If we should verify the server, we need to load a trusted ca
- certPool, err := SystemCertPool()
- if err != nil {
- return nil, fmt.Errorf("failed to read system certificates: %v", err)
- }
- pem, err := ioutil.ReadFile(caFile)
- if err != nil {
- return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
- }
- if !certPool.AppendCertsFromPEM(pem) {
- return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
- }
- logrus.Debugf("Trusting %d certs", len(certPool.Subjects()))
- return certPool, nil
-}
-
-// Client returns a TLS configuration meant to be used by a client.
-func Client(options Options) (*tls.Config, error) {
- tlsConfig := ClientDefault()
- tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
- if !options.InsecureSkipVerify && options.CAFile != "" {
- CAs, err := certPool(options.CAFile)
- if err != nil {
- return nil, err
- }
- tlsConfig.RootCAs = CAs
- }
-
- if options.CertFile != "" || options.KeyFile != "" {
- tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
- if err != nil {
- return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err)
- }
- tlsConfig.Certificates = []tls.Certificate{tlsCert}
- }
-
- return tlsConfig, nil
-}
-
-// Server returns a TLS configuration meant to be used by a server.
-func Server(options Options) (*tls.Config, error) {
- tlsConfig := ServerDefault()
- tlsConfig.ClientAuth = options.ClientAuth
- tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
- }
- return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
- }
- tlsConfig.Certificates = []tls.Certificate{tlsCert}
- if options.ClientAuth >= tls.VerifyClientCertIfGiven {
- CAs, err := certPool(options.CAFile)
- if err != nil {
- return nil, err
- }
- tlsConfig.ClientCAs = CAs
- }
- return tlsConfig, nil
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
deleted file mode 100644
index 6b4c6a7..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build go1.5
-
-// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
-//
-package tlsconfig
-
-import (
- "crypto/tls"
-)
-
-// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
-var clientCipherSuites = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
deleted file mode 100644
index ee22df4..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build !go1.5
-
-// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
-//
-package tlsconfig
-
-import (
- "crypto/tls"
-)
-
-// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
-var clientCipherSuites = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
-}
diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md
deleted file mode 100644
index 9ea86d7..0000000
--- a/vendor/github.com/docker/go-units/CONTRIBUTING.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# Contributing to go-units
-
-Want to hack on go-units? Awesome! Here are instructions to get you started.
-
-go-units is a part of the [Docker](https://www.docker.com) project, and follows
-the same rules and principles. If you're already familiar with the way
-Docker does things, you'll feel right at home.
-
-Otherwise, go read Docker's
-[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
-[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
-[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
-[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
-
-### Sign your work
-
-The sign-off is a simple line at the end of the explanation for the patch. Your
-signature certifies that you wrote the patch or otherwise have the right to pass
-it on as an open-source patch. The rules are pretty simple: if you can certify
-the below (from [developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-Then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith
-
-Use your real name (sorry, no pseudonyms or anonymous contributions.)
-
-If you set your `user.name` and `user.email` git configs, you can sign your
-commit automatically with `git commit -s`.
diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE
deleted file mode 100644
index b55b37b..0000000
--- a/vendor/github.com/docker/go-units/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2015 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS
deleted file mode 100644
index 477be8b..0000000
--- a/vendor/github.com/docker/go-units/MAINTAINERS
+++ /dev/null
@@ -1,27 +0,0 @@
-# go-connections maintainers file
-#
-# This file describes who runs the docker/go-connections project and how.
-# This is a living document - if you see something out of date or missing, speak up!
-#
-# It is structured to be consumable by both humans and programs.
-# To extract its contents programmatically, use any TOML-compliant parser.
-#
-# This file is compiled into the MAINTAINERS file in docker/opensource.
-#
-[Org]
- [Org."Core maintainers"]
- people = [
- "calavera",
- ]
-
-[people]
-
-# A reference list of all people associated with the project.
-# All other sections should refer to people by their canonical key
-# in the people section.
-
- # ADD YOURSELF HERE IN ALPHABETICAL ORDER
- [people.calavera]
- Name = "David Calavera"
- Email = "david.calavera@gmail.com"
- GitHub = "calavera"
diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md
deleted file mode 100644
index 4f70a4e..0000000
--- a/vendor/github.com/docker/go-units/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-[](https://godoc.org/github.com/docker/go-units)
-
-# Introduction
-
-go-units is a library to transform human friendly measurements into machine friendly values.
-
-## Usage
-
-See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
-
-## Copyright and license
-
-Copyright © 2015 Docker, Inc.
-
-go-units is licensed under the Apache License, Version 2.0.
-See [LICENSE](LICENSE) for the full text of the license.
diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml
deleted file mode 100644
index 9043b35..0000000
--- a/vendor/github.com/docker/go-units/circle.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-dependencies:
- post:
- # install golint
- - go get github.com/golang/lint/golint
-
-test:
- pre:
- # run analysis before tests
- - go vet ./...
- - test -z "$(golint ./... | tee /dev/stderr)"
- - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
deleted file mode 100644
index ba02af2..0000000
--- a/vendor/github.com/docker/go-units/duration.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Package units provides helper function to parse and print size and time units
-// in human-readable format.
-package units
-
-import (
- "fmt"
- "time"
-)
-
-// HumanDuration returns a human-readable approximation of a duration
-// (eg. "About a minute", "4 hours ago", etc.).
-func HumanDuration(d time.Duration) string {
- if seconds := int(d.Seconds()); seconds < 1 {
- return "Less than a second"
- } else if seconds == 1 {
- return "1 second"
- } else if seconds < 60 {
- return fmt.Sprintf("%d seconds", seconds)
- } else if minutes := int(d.Minutes()); minutes == 1 {
- return "About a minute"
- } else if minutes < 46 {
- return fmt.Sprintf("%d minutes", minutes)
- } else if hours := int(d.Hours() + 0.5); hours == 1 {
- return "About an hour"
- } else if hours < 48 {
- return fmt.Sprintf("%d hours", hours)
- } else if hours < 24*7*2 {
- return fmt.Sprintf("%d days", hours/24)
- } else if hours < 24*30*2 {
- return fmt.Sprintf("%d weeks", hours/24/7)
- } else if hours < 24*365*2 {
- return fmt.Sprintf("%d months", hours/24/30)
- }
- return fmt.Sprintf("%d years", int(d.Hours())/24/365)
-}
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
deleted file mode 100644
index b6485ed..0000000
--- a/vendor/github.com/docker/go-units/size.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package units
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-// See: http://en.wikipedia.org/wiki/Binary_prefix
-const (
- // Decimal
-
- KB = 1000
- MB = 1000 * KB
- GB = 1000 * MB
- TB = 1000 * GB
- PB = 1000 * TB
-
- // Binary
-
- KiB = 1024
- MiB = 1024 * KiB
- GiB = 1024 * MiB
- TiB = 1024 * GiB
- PiB = 1024 * TiB
-)
-
-type unitMap map[string]int64
-
-var (
- decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
- binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
- sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
-)
-
-var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
-var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
-
-func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
- i := 0
- unitsLimit := len(_map) - 1
- for size >= base && i < unitsLimit {
- size = size / base
- i++
- }
- return size, _map[i]
-}
-
-// CustomSize returns a human-readable approximation of a size
-// using custom format.
-func CustomSize(format string, size float64, base float64, _map []string) string {
- size, unit := getSizeAndUnit(size, base, _map)
- return fmt.Sprintf(format, size, unit)
-}
-
-// HumanSizeWithPrecision allows the size to be in any precision,
-// instead of 4 digit precision used in units.HumanSize.
-func HumanSizeWithPrecision(size float64, precision int) string {
- size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs)
- return fmt.Sprintf("%.*g %s", precision, size, unit)
-}
-
-// HumanSize returns a human-readable approximation of a size
-// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
-func HumanSize(size float64) string {
- return HumanSizeWithPrecision(size, 4)
-}
-
-// BytesSize returns a human-readable size in bytes, kibibytes,
-// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
-func BytesSize(size float64) string {
- return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
-}
-
-// FromHumanSize returns an integer from a human-readable specification of a
-// size using SI standard (eg. "44kB", "17MB").
-func FromHumanSize(size string) (int64, error) {
- return parseSize(size, decimalMap)
-}
-
-// RAMInBytes parses a human-readable string representing an amount of RAM
-// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
-// returns the number of bytes, or -1 if the string is unparseable.
-// Units are case-insensitive, and the 'b' suffix is optional.
-func RAMInBytes(size string) (int64, error) {
- return parseSize(size, binaryMap)
-}
-
-// Parses the human-readable size string into the amount it represents.
-func parseSize(sizeStr string, uMap unitMap) (int64, error) {
- matches := sizeRegex.FindStringSubmatch(sizeStr)
- if len(matches) != 4 {
- return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
- }
-
- size, err := strconv.ParseFloat(matches[1], 64)
- if err != nil {
- return -1, err
- }
-
- unitPrefix := strings.ToLower(matches[3])
- if mul, ok := uMap[unitPrefix]; ok {
- size *= float64(mul)
- }
-
- return int64(size), nil
-}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
deleted file mode 100644
index 5ac7fd8..0000000
--- a/vendor/github.com/docker/go-units/ulimit.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package units
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// Ulimit is a human friendly version of Rlimit.
-type Ulimit struct {
- Name string
- Hard int64
- Soft int64
-}
-
-// Rlimit specifies the resource limits, such as max open files.
-type Rlimit struct {
- Type int `json:"type,omitempty"`
- Hard uint64 `json:"hard,omitempty"`
- Soft uint64 `json:"soft,omitempty"`
-}
-
-const (
- // magic numbers for making the syscall
- // some of these are defined in the syscall package, but not all.
- // Also since Windows client doesn't get access to the syscall package, need to
- // define these here
- rlimitAs = 9
- rlimitCore = 4
- rlimitCPU = 0
- rlimitData = 2
- rlimitFsize = 1
- rlimitLocks = 10
- rlimitMemlock = 8
- rlimitMsgqueue = 12
- rlimitNice = 13
- rlimitNofile = 7
- rlimitNproc = 6
- rlimitRss = 5
- rlimitRtprio = 14
- rlimitRttime = 15
- rlimitSigpending = 11
- rlimitStack = 3
-)
-
-var ulimitNameMapping = map[string]int{
- //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
- "core": rlimitCore,
- "cpu": rlimitCPU,
- "data": rlimitData,
- "fsize": rlimitFsize,
- "locks": rlimitLocks,
- "memlock": rlimitMemlock,
- "msgqueue": rlimitMsgqueue,
- "nice": rlimitNice,
- "nofile": rlimitNofile,
- "nproc": rlimitNproc,
- "rss": rlimitRss,
- "rtprio": rlimitRtprio,
- "rttime": rlimitRttime,
- "sigpending": rlimitSigpending,
- "stack": rlimitStack,
-}
-
-// ParseUlimit parses and returns a Ulimit from the specified string.
-func ParseUlimit(val string) (*Ulimit, error) {
- parts := strings.SplitN(val, "=", 2)
- if len(parts) != 2 {
- return nil, fmt.Errorf("invalid ulimit argument: %s", val)
- }
-
- if _, exists := ulimitNameMapping[parts[0]]; !exists {
- return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
- }
-
- var (
- soft int64
- hard = &soft // default to soft in case no hard was set
- temp int64
- err error
- )
- switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
- case 2:
- temp, err = strconv.ParseInt(limitVals[1], 10, 64)
- if err != nil {
- return nil, err
- }
- hard = &temp
- fallthrough
- case 1:
- soft, err = strconv.ParseInt(limitVals[0], 10, 64)
- if err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
- }
-
- if soft > *hard {
- return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
- }
-
- return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
-}
-
-// GetRlimit returns the RLimit corresponding to Ulimit.
-func (u *Ulimit) GetRlimit() (*Rlimit, error) {
- t, exists := ulimitNameMapping[u.Name]
- if !exists {
- return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
- }
-
- return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
-}
-
-func (u *Ulimit) String() string {
- return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
-}
diff --git a/vendor/github.com/jcelliott/lumber/LICENSE b/vendor/github.com/jcelliott/lumber/LICENSE
deleted file mode 100644
index 3cdedce..0000000
--- a/vendor/github.com/jcelliott/lumber/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License
-
-Copyright (c) 2013 Joshua Elliott
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/jcelliott/lumber/README.md b/vendor/github.com/jcelliott/lumber/README.md
deleted file mode 100644
index f6e8926..0000000
--- a/vendor/github.com/jcelliott/lumber/README.md
+++ /dev/null
@@ -1,71 +0,0 @@
-lumber
-======
-
-A simple logger for Go.
-
-Provides console and file loggers that support 6 log levels. The file logger supports log backup and
-rotation.
-
-### Usage: ###
-Log to the default (console) logger
-
-```go
-lumber.Error("An error message")
-```
-
-Create a new console logger that only logs messages of level WARN or higher
-
-```go
-log := lumber.NewConsoleLogger(lumber.WARN)
-```
-
-Change the log level for a logger
-
-```go
-log.Level(lumber.INFO)
-```
-
-Create a new file logger that rotates at 5000 lines (up to 9 backups) with a 100 message buffer
-
-```go
-log := lumber.NewFileLogger("filename.log", lumber.INFO, lumber.ROTATE, 5000, 9, 100)
-// or
-log := lumber.NewRotateLogger("filename.log", 5000, 9)
-```
-
-Send messages to the log
-
-```go
-// the log methods use fmt.Printf() syntax
-log.Trace("the %s log level", "lowest")
-log.Debug("")
-log.Info("the default log level")
-log.Warn("")
-log.Error("")
-log.Fatal("the %s log level", "highest")
-```
-
-Add a prefix to label different logs
-
-```go
-log.Prefix("MYAPP")
-```
-
-Use a MultiLogger
-
-```go
-mlog := NewMultiLogger()
-mlog.AddLoggers(log1, log2)
-mlog.Warn("This message goes to multiple loggers")
-mlog.Close() // closes all loggers
-```
-
-### Modes: ###
-
-APPEND: Append if the file exists, otherwise create a new file
-
-TRUNC: Open and truncate the file, regardless of whether it already exists
-
-BACKUP: Rotate the log every time a new logger is created
-
-ROTATE: Append if the file exists, when the log reaches maxLines rotate files
diff --git a/vendor/github.com/jcelliott/lumber/consolelog.go b/vendor/github.com/jcelliott/lumber/consolelog.go
deleted file mode 100644
index 1fbae7d..0000000
--- a/vendor/github.com/jcelliott/lumber/consolelog.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package lumber
-
-import (
- "fmt"
- "io"
- "os"
- "time"
-)
-
-type ConsoleLogger struct {
- out io.WriteCloser
- outLevel int
- timeFormat string
- prefix string
- levels []string
- closed bool
-}
-
-// Create a new console logger with output level o, and an empty prefix
-func NewConsoleLogger(o int) *ConsoleLogger {
- return &ConsoleLogger{
- out: os.Stdout,
- outLevel: o,
- timeFormat: TIMEFORMAT,
- prefix: "",
- levels: levels,
- }
-}
-
-func NewBasicLogger(f io.WriteCloser, level int) *ConsoleLogger {
- return &ConsoleLogger{
- out: f,
- outLevel: level,
- timeFormat: TIMEFORMAT,
- prefix: "",
- levels: levels,
- }
-}
-
-// Generic output function. If msg does not end with a newline, one will be appended.
-func (l *ConsoleLogger) output(msg *Message) {
- buf := []byte{}
- buf = append(buf, msg.time.Format(l.timeFormat)...)
- if l.prefix != "" {
- buf = append(buf, ' ')
- buf = append(buf, l.prefix...)
- }
- buf = append(buf, ' ')
- buf = append(buf, l.levels[msg.level]...)
- buf = append(buf, ' ')
- buf = append(buf, msg.m...)
- if len(msg.m) > 0 && msg.m[len(msg.m)-1] != '\n' {
- buf = append(buf, '\n')
- }
- l.out.Write(buf)
-}
-
-// Sets the available levels for this logger
-func (l *ConsoleLogger) SetLevels(lvls []string) {
- if lvls[len(lvls)-1] != "*LOG*" {
- lvls = append(lvls, "*LOG*")
- }
- l.levels = lvls
-}
-
-// Sets the output level for this logger
-func (l *ConsoleLogger) Level(o int) {
- if o >= 0 && o <= len(l.levels)-1 {
- l.outLevel = o
- }
-}
-
-// Sets the prefix for this logger
-func (l *ConsoleLogger) Prefix(p string) {
- l.prefix = p
-}
-
-// Sets the time format for this logger
-func (l *ConsoleLogger) TimeFormat(f string) {
- l.timeFormat = f
-}
-
-// Close the logger
-func (l *ConsoleLogger) Close() {
- l.closed = true
- l.output(&Message{len(l.levels) - 1, "Closing log now", time.Now()})
- l.out.Close()
-}
-
-func (l *ConsoleLogger) log(lvl int, format string, v ...interface{}) {
- if lvl < l.outLevel || l.closed {
- return
- }
- // recover in case the channel has already been closed (unlikely race condition)
- // this could also be solved with a lock, but would cause a performance hit
- defer recover()
- l.output(&Message{lvl, fmt.Sprintf(format, v...), time.Now()})
-}
-
-// Logging functions
-func (l *ConsoleLogger) Fatal(format string, v ...interface{}) {
- l.log(FATAL, format, v...)
-}
-
-func (l *ConsoleLogger) Error(format string, v ...interface{}) {
- l.log(ERROR, format, v...)
-}
-
-func (l *ConsoleLogger) Warn(format string, v ...interface{}) {
- l.log(WARN, format, v...)
-}
-
-func (l *ConsoleLogger) Info(format string, v ...interface{}) {
- l.log(INFO, format, v...)
-}
-
-func (l *ConsoleLogger) Debug(format string, v ...interface{}) {
- l.log(DEBUG, format, v...)
-}
-
-func (l *ConsoleLogger) Trace(format string, v ...interface{}) {
- l.log(TRACE, format, v...)
-}
-
-func (l *ConsoleLogger) Print(lvl int, v ...interface{}) {
- l.output(&Message{lvl, fmt.Sprint(v...), time.Now()})
-}
-
-func (l *ConsoleLogger) Printf(lvl int, format string, v ...interface{}) {
- l.output(&Message{lvl, fmt.Sprintf(format, v...), time.Now()})
-}
-
-func (l *ConsoleLogger) GetLevel() int {
- return l.outLevel
-}
-
-func (l *ConsoleLogger) IsFatal() bool {
- return l.outLevel <= FATAL
-}
-
-func (l *ConsoleLogger) IsError() bool {
- return l.outLevel <= ERROR
-}
-
-func (l *ConsoleLogger) IsWarn() bool {
- return l.outLevel <= WARN
-}
-
-func (l *ConsoleLogger) IsInfo() bool {
- return l.outLevel <= INFO
-}
-
-func (l *ConsoleLogger) IsDebug() bool {
- return l.outLevel <= DEBUG
-}
-
-func (l *ConsoleLogger) IsTrace() bool {
- return l.outLevel <= TRACE
-}
diff --git a/vendor/github.com/jcelliott/lumber/filelog.go b/vendor/github.com/jcelliott/lumber/filelog.go
deleted file mode 100644
index e13e877..0000000
--- a/vendor/github.com/jcelliott/lumber/filelog.go
+++ /dev/null
@@ -1,362 +0,0 @@
-package lumber
-
-import (
- "bufio"
- "fmt"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- // mode constants
- APPEND = iota
- TRUNC
- BACKUP
- ROTATE
-)
-
-const (
- BUFSIZE = 100
-)
-
-type FileLogger struct {
- queue chan *Message
- done chan bool
- out *os.File
- timeFormat, prefix string
- outLevel, maxLines, curLines, maxRotate, mode int
- closed, errored bool
- levels []string
-}
-
-// Convenience function to create a new append-only logger
-func NewAppendLogger(f string) (*FileLogger, error) {
- return NewFileLogger(f, INFO, APPEND, 0, 0, BUFSIZE)
-}
-
-// Convenience function to create a new truncating logger
-func NewTruncateLogger(f string) (*FileLogger, error) {
- return NewFileLogger(f, INFO, TRUNC, 0, 0, BUFSIZE)
-}
-
-// Convenience function to create a new backup logger
-func NewBackupLogger(f string, maxBackup int) (*FileLogger, error) {
- return NewFileLogger(f, INFO, BACKUP, 0, maxBackup, BUFSIZE)
-}
-
-// Convenience function to create a new rotating logger
-func NewRotateLogger(f string, maxLines, maxRotate int) (*FileLogger, error) {
- return NewFileLogger(f, INFO, ROTATE, maxLines, maxRotate, BUFSIZE)
-}
-
-// Creates a new FileLogger with filename f, output level o, and an empty prefix.
-// Modes are described in the documentation; maxLines and maxRotate are only significant
-// for some modes.
-func NewFileLogger(f string, o, mode, maxLines, maxRotate, bufsize int) (*FileLogger, error) {
- var file *os.File
- var err error
-
- switch mode {
- case APPEND:
- // open log file, append if it already exists
- file, err = os.OpenFile(f, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
- case TRUNC:
- // just truncate file and start logging
- file, err = os.OpenFile(f, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
- case BACKUP:
- // rotate every time a new logger is created
- file, err = openBackup(f, 0, maxRotate)
- case ROTATE:
- // "normal" rotation, when file reaches line limit
- file, err = openBackup(f, maxLines, maxRotate)
- default:
- return nil, fmt.Errorf("Invalid mode parameter: %d", mode)
- }
- if err != nil {
- return nil, fmt.Errorf("Error creating logger: %s", err)
- }
-
- return newFileLogger(file, o, mode, maxLines, maxRotate, bufsize), nil
-}
-
-func NewBasicFileLogger(f *os.File, level int) (l *FileLogger) {
- return newFileLogger(f, level, 0, 0, 0, BUFSIZE)
-}
-
-func newFileLogger(f *os.File, o, mode, maxLines, maxRotate, bufsize int) (l *FileLogger) {
- l = &FileLogger{
- queue: make(chan *Message, bufsize),
- done: make(chan bool),
- out: f,
- outLevel: o,
- timeFormat: TIMEFORMAT,
- prefix: "",
- maxLines: maxLines,
- maxRotate: maxRotate,
- mode: mode,
- levels: levels,
- }
-
- if mode == ROTATE {
- // get the current line count if relevant
- l.curLines = countLines(l.out)
- }
-
- go l.startOutput()
- return
-}
-
-func (l *FileLogger) startOutput() {
- for {
- m, ok := <-l.queue
- if !ok {
- // the channel is closed and empty
- l.printLog(&Message{len(l.levels) - 1, fmt.Sprintf("Closing log now"), time.Now()})
- l.out.Sync()
- if err := l.out.Close(); err != nil {
- l.printLog(&Message{len(l.levels) - 1, fmt.Sprintf("Error closing log file: %s", err), time.Now()})
- }
- l.done <- true
- return
- }
- l.output(m)
- }
-}
-
-// Attempt to create new log. Specific behavior depends on the maxLines setting
-func openBackup(f string, maxLines, maxRotate int) (*os.File, error) {
- // first try to open the file with O_EXCL (file must not already exist)
- file, err := os.OpenFile(f, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0644)
- // if there are no errors (it's a new file), we can just use this file
- if err == nil {
- return file, nil
- }
- // if the error wasn't an 'Exist' error, we've got a problem
- if !os.IsExist(err) {
- return nil, fmt.Errorf("Error opening file for logging: %s", err)
- }
-
- if maxLines == 0 {
- // we're in backup mode, rotate and return the new file
- return doRotate(f, maxRotate)
- }
-
- // the file already exists, open it
- return os.OpenFile(f, os.O_RDWR|os.O_APPEND, 0644)
-}
-
-// Rotate the logs
-func (l *FileLogger) rotate() error {
- oldFile := l.out
- file, err := doRotate(l.out.Name(), l.maxRotate)
- if err != nil {
- return fmt.Errorf("Error rotating logs: %s", err)
- }
- l.curLines = 0
- l.out = file
- oldFile.Close()
- return nil
-}
-
-// Rotate all the logs and return a file with newly vacated filename
-// Rename 'log.name' to 'log.name.1' and 'log.name.1' to 'log.name.2' etc
-func doRotate(f string, limit int) (*os.File, error) {
- // create a format string with the correct amount of zero-padding for the limit
- numFmt := fmt.Sprintf(".%%0%dd", len(fmt.Sprintf("%d", limit)))
- // get all rotated files and sort them in reverse order
- list, err := filepath.Glob(fmt.Sprintf("%s.*", f))
- if err != nil {
- return nil, fmt.Errorf("Error rotating logs: %s", err)
- }
- sort.Sort(sort.Reverse(sort.StringSlice(list)))
- for _, file := range list {
- parts := strings.Split(file, ".")
- numPart := parts[len(parts)-1]
- num, err := strconv.Atoi(numPart)
- if err != nil {
- // not a number, don't rotate it
- continue
- }
- if num >= limit {
- // we're at the limit, don't rotate it
- continue
- }
- newName := fmt.Sprintf(strings.Join(parts[:len(parts)-1], ".")+numFmt, num+1)
- // don't check error because there's nothing we can do
- os.Rename(file, newName)
- }
- if err = os.Rename(f, fmt.Sprintf(f+numFmt, 1)); err != nil {
- if !os.IsNotExist(err) {
- return nil, fmt.Errorf("Error rotating logs: %s", err)
- }
- }
- return os.OpenFile(f, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
-}
-
-// Generic output function. Outputs messages if they are higher level than outLevel for this
-// specific logger. If msg does not end with a newline, one will be appended.
-func (l *FileLogger) output(msg *Message) {
- if l.mode == ROTATE && l.curLines >= l.maxLines && !l.errored {
- err := l.rotate()
- if err != nil {
- // if we can't rotate the logs, we should stop logging to prevent the log file from growing
- // past the limit and continuously retrying the rotate operation (but log current msg first)
- l.printLog(msg)
- l.printLog(&Message{len(l.levels) - 1, fmt.Sprintf("Error rotating logs: %s. Closing log."), time.Now()})
- l.errored = true
- l.close()
- }
- }
- l.printLog(msg)
-}
-
-func (l *FileLogger) printLog(msg *Message) {
- buf := []byte{}
- buf = append(buf, msg.time.Format(l.timeFormat)...)
- if l.prefix != "" {
- buf = append(buf, ' ')
- buf = append(buf, l.prefix...)
- }
- buf = append(buf, ' ')
- buf = append(buf, l.levels[msg.level]...)
- buf = append(buf, ' ')
- buf = append(buf, msg.m...)
- if len(msg.m) > 0 && msg.m[len(msg.m)-1] != '\n' {
- buf = append(buf, '\n')
- }
- l.curLines += 1
- l.out.Write(buf)
-}
-
-// Sets the available levels for this logger
-// TODO: append a *LOG* level
-func (l *FileLogger) SetLevels(lvls []string) {
- if lvls[len(lvls)-1] != "*LOG*" {
- lvls = append(lvls, "*LOG*")
- }
- l.levels = lvls
-}
-
-// Sets the output level for this logger
-func (l *FileLogger) Level(o int) {
- if o >= 0 && o <= len(l.levels)-1 {
- l.outLevel = o
- }
-}
-
-// Sets the prefix for this logger
-func (l *FileLogger) Prefix(p string) {
- l.prefix = p
-}
-
-// Sets the time format for this logger
-func (l *FileLogger) TimeFormat(f string) {
- l.timeFormat = f
-}
-
-// Flush the messages in the queue and shut down the logger.
-func (l *FileLogger) close() {
- l.closed = true
- // closing the channel will signal the goroutine to finish writing messages in the queue
- // and then shut down by sync'ing and close'ing the file.
- close(l.queue)
-}
-
-// Flush the messages in the queue and shut down the logger.
-func (l *FileLogger) Close() {
- l.close()
- <-l.done
-}
-
-// return the number of lines in the given file
-func countLines(f *os.File) int {
- r := bufio.NewReader(f)
- count := 0
- var err error = nil
- for err == nil {
- prefix := true
- _, prefix, err = r.ReadLine()
- if err != nil {
- }
- // sometimes we don't get the whole line at once
- if !prefix && err == nil {
- count++
- }
- }
- return count
-}
-
-func (l *FileLogger) log(lvl int, format string, v ...interface{}) {
- if lvl < l.outLevel || l.closed {
- return
- }
- // recover in case the channel has already been closed (unlikely race condition)
- // this could also be solved with a lock, but would cause a performance hit
- defer recover()
- l.queue <- &Message{lvl, fmt.Sprintf(format, v...), time.Now()}
-}
-
-// Logging functions
-func (l *FileLogger) Fatal(format string, v ...interface{}) {
- l.log(FATAL, format, v...)
-}
-
-func (l *FileLogger) Error(format string, v ...interface{}) {
- l.log(ERROR, format, v...)
-}
-
-func (l *FileLogger) Warn(format string, v ...interface{}) {
- l.log(WARN, format, v...)
-}
-
-func (l *FileLogger) Info(format string, v ...interface{}) {
- l.log(INFO, format, v...)
-}
-
-func (l *FileLogger) Debug(format string, v ...interface{}) {
- l.log(DEBUG, format, v...)
-}
-
-func (l *FileLogger) Trace(format string, v ...interface{}) {
- l.log(TRACE, format, v...)
-}
-
-func (l *FileLogger) Print(lvl int, v ...interface{}) {
- l.output(&Message{lvl, fmt.Sprint(v...), time.Now()})
-}
-
-func (l *FileLogger) Printf(lvl int, format string, v ...interface{}) {
- l.output(&Message{lvl, fmt.Sprintf(format, v...), time.Now()})
-}
-
-func (l *FileLogger) GetLevel() int {
- return l.outLevel
-}
-
-func (l *FileLogger) IsFatal() bool {
- return l.outLevel <= FATAL
-}
-
-func (l *FileLogger) IsError() bool {
- return l.outLevel <= ERROR
-}
-
-func (l *FileLogger) IsWarn() bool {
- return l.outLevel <= WARN
-}
-
-func (l *FileLogger) IsInfo() bool {
- return l.outLevel <= INFO
-}
-
-func (l *FileLogger) IsDebug() bool {
- return l.outLevel <= DEBUG
-}
-
-func (l *FileLogger) IsTrace() bool {
- return l.outLevel <= TRACE
-}
diff --git a/vendor/github.com/jcelliott/lumber/lumber.go b/vendor/github.com/jcelliott/lumber/lumber.go
deleted file mode 100644
index 61795b2..0000000
--- a/vendor/github.com/jcelliott/lumber/lumber.go
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
-Package lumber implements a simple logger that supports log levels and rotation.
-*/
-package lumber
-
-import (
- "strings"
- "time"
-)
-
-const (
- TRACE = iota
- DEBUG
- INFO
- WARN
- ERROR
- FATAL
-
- TIMEFORMAT = "2006-01-02 15:04:05"
-)
-
-var (
- stdLog Logger = NewConsoleLogger(INFO)
- levels = []string{"TRACE", "DEBUG", "INFO ", "WARN ", "ERROR", "FATAL", "*LOG*"}
- timeFormat = TIMEFORMAT
-)
-
-type Logger interface {
- Fatal(string, ...interface{})
- Error(string, ...interface{})
- Warn(string, ...interface{})
- Info(string, ...interface{})
- Debug(string, ...interface{})
- Trace(string, ...interface{})
-
- IsFatal() bool
- IsError() bool
- IsWarn() bool
- IsInfo() bool
- IsDebug() bool
- IsTrace() bool
- GetLevel() int
-
- Print(int, ...interface{})
- Printf(int, string, ...interface{})
- Level(int)
- Prefix(string)
- TimeFormat(string)
- Close()
- output(msg *Message)
-}
-
-type Message struct {
- level int
- m string
- time time.Time
-}
-
-// SetLogger sets a new default logger
-func SetLogger(l Logger) {
- stdLog = l
-}
-
-// Returns the string representation of the level
-func LvlStr(l int) string {
- if l >= 0 && l <= len(levels)-1 {
- return levels[l]
- }
- return ""
-}
-
-// Returns the int value of the level
-func LvlInt(s string) int {
- for i, str := range levels {
- if strings.TrimSpace(str) == strings.ToUpper(s) {
- return i
- }
- }
- return 0
-}
-
-// Sets the output level for the default logger
-func Level(o int) {
- stdLog.Level(o)
-}
-
-// Sets the time format for the default logger
-func TimeFormat(f string) {
- stdLog.TimeFormat(f)
-}
-
-// Close the default logger
-func Close() {
- stdLog.Close()
-}
-
-// Prefix sets a prefix for the default logger
-func Prefix(p string) {
- stdLog.Prefix(p)
-}
-
-// Logging functions
-func Fatal(format string, v ...interface{}) {
- stdLog.Fatal(format, v...)
-}
-
-func Error(format string, v ...interface{}) {
- stdLog.Error(format, v...)
-}
-
-func Warn(format string, v ...interface{}) {
- stdLog.Warn(format, v...)
-}
-
-func Info(format string, v ...interface{}) {
- stdLog.Info(format, v...)
-}
-
-func Debug(format string, v ...interface{}) {
- stdLog.Debug(format, v...)
-}
-
-func Trace(format string, v ...interface{}) {
- stdLog.Trace(format, v...)
-}
-
-func Print(lvl int, v ...interface{}) {
- stdLog.Print(lvl, v...)
-}
-
-func Printf(lvl int, format string, v ...interface{}) {
- stdLog.Printf(lvl, format, v...)
-}
-
-func GetLevel() int {
- return stdLog.GetLevel()
-}
-
-func IsFatal() bool {
- return stdLog.IsFatal()
-}
-
-func IsError() bool {
- return stdLog.IsError()
-}
-
-func IsWarn() bool {
- return stdLog.IsWarn()
-}
-
-func IsInfo() bool {
- return stdLog.IsInfo()
-}
-
-func IsDebug() bool {
- return stdLog.IsDebug()
-}
-
-func IsTrace() bool {
- return stdLog.IsTrace()
-}
diff --git a/vendor/github.com/jcelliott/lumber/multilog.go b/vendor/github.com/jcelliott/lumber/multilog.go
deleted file mode 100644
index c39e877..0000000
--- a/vendor/github.com/jcelliott/lumber/multilog.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package lumber
-
-import (
- "fmt"
- "time"
-)
-
-type MultiLogger struct {
- loggers []Logger
-}
-
-func NewMultiLogger() (l *MultiLogger) {
- return &MultiLogger{}
-}
-
-func (p *MultiLogger) AddLoggers(newLogs ...Logger) {
- for _, l := range newLogs {
- p.loggers = append(p.loggers, l)
- }
-}
-
-func (p *MultiLogger) ClearLoggers() {
- p.loggers = make([]Logger, 0)
-}
-
-// All of these implement the Logger interface and distribute calls to it over
-// all of the member Logger objects.
-func (p *MultiLogger) Fatal(s string, v ...interface{}) {
- for _, logger := range p.loggers {
- logger.Fatal(s, v...)
- }
-}
-
-func (p *MultiLogger) Error(s string, v ...interface{}) {
- for _, logger := range p.loggers {
- logger.Error(s, v...)
- }
-}
-
-func (p *MultiLogger) Warn(s string, v ...interface{}) {
- for _, logger := range p.loggers {
- logger.Warn(s, v...)
- }
-}
-
-func (p *MultiLogger) Info(s string, v ...interface{}) {
- for _, logger := range p.loggers {
- logger.Info(s, v...)
- }
-}
-
-func (p *MultiLogger) Debug(s string, v ...interface{}) {
- for _, logger := range p.loggers {
- logger.Debug(s, v...)
- }
-}
-
-func (p *MultiLogger) Trace(s string, v ...interface{}) {
- for _, logger := range p.loggers {
- logger.Trace(s, v...)
- }
-}
-
-func (p *MultiLogger) Level(i int) {
- for _, logger := range p.loggers {
- logger.Level(i)
- }
-}
-
-func (p *MultiLogger) Prefix(s string) {
- for _, logger := range p.loggers {
- logger.Prefix(s)
- }
-}
-
-func (p *MultiLogger) TimeFormat(s string) {
- for _, logger := range p.loggers {
- logger.TimeFormat(s)
- }
-}
-
-func (p *MultiLogger) Close() {
- for _, logger := range p.loggers {
- logger.Close()
- }
-}
-
-func (p *MultiLogger) output(m *Message) {
- for _, logger := range p.loggers {
- logger.output(m)
- }
-}
-
-func (p *MultiLogger) Print(lvl int, v ...interface{}) {
- p.output(&Message{lvl, fmt.Sprint(v...), time.Now()})
-}
-
-func (p *MultiLogger) Printf(lvl int, format string, v ...interface{}) {
- p.output(&Message{lvl, fmt.Sprintf(format, v...), time.Now()})
-}
-
-func (p *MultiLogger) GetLevel() int {
- level := FATAL
- for _, logger := range p.loggers {
- if logger.GetLevel() <= level {
- level = logger.GetLevel()
- }
- }
- return level
-}
-
-func (p *MultiLogger) IsFatal() bool {
- return p.GetLevel() <= FATAL
-}
-
-func (p *MultiLogger) IsError() bool {
- return p.GetLevel() <= ERROR
-}
-
-func (p *MultiLogger) IsWarn() bool {
- return p.GetLevel() <= WARN
-}
-
-func (p *MultiLogger) IsInfo() bool {
- return p.GetLevel() <= INFO
-}
-
-func (p *MultiLogger) IsDebug() bool {
- return p.GetLevel() <= DEBUG
-}
-
-func (p *MultiLogger) IsTrace() bool {
- return p.GetLevel() <= TRACE
-}
diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE
deleted file mode 100644
index 2744858..0000000
--- a/vendor/github.com/opencontainers/runc/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2014 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE
deleted file mode 100644
index 5c97abc..0000000
--- a/vendor/github.com/opencontainers/runc/NOTICE
+++ /dev/null
@@ -1,17 +0,0 @@
-runc
-
-Copyright 2012-2015 Docker, Inc.
-
-This product includes software developed at Docker, Inc. (http://www.docker.com).
-
-The following is courtesy of our legal counsel:
-
-
-Use and transfer of Docker may be subject to certain restrictions by the
-United States and other governments.
-It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws.
-
-For more information, please see http://www.bis.doc.gov
-
-See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
deleted file mode 100644
index edbe200..0000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
+++ /dev/null
@@ -1,2 +0,0 @@
-Tianon Gravi (@tianon)
-Aleksa Sarai (@cyphar)
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
deleted file mode 100644
index ab1439f..0000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package user
-
-import (
- "errors"
- "syscall"
-)
-
-var (
- // The current operating system does not provide the required data for user lookups.
- ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
- // No matching entries found in file.
- ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
- ErrNoGroupEntries = errors.New("no matching entries in group file")
-)
-
-func lookupUser(filter func(u User) bool) (User, error) {
- // Get operating system-specific passwd reader-closer.
- passwd, err := GetPasswd()
- if err != nil {
- return User{}, err
- }
- defer passwd.Close()
-
- // Get the users.
- users, err := ParsePasswdFilter(passwd, filter)
- if err != nil {
- return User{}, err
- }
-
- // No user entries found.
- if len(users) == 0 {
- return User{}, ErrNoPasswdEntries
- }
-
- // Assume the first entry is the "correct" one.
- return users[0], nil
-}
-
-// CurrentUser looks up the current user by their user id in /etc/passwd. If the
-// user cannot be found (or there is no /etc/passwd file on the filesystem),
-// then CurrentUser returns an error.
-func CurrentUser() (User, error) {
- return LookupUid(syscall.Getuid())
-}
-
-// LookupUser looks up a user by their username in /etc/passwd. If the user
-// cannot be found (or there is no /etc/passwd file on the filesystem), then
-// LookupUser returns an error.
-func LookupUser(username string) (User, error) {
- return lookupUser(func(u User) bool {
- return u.Name == username
- })
-}
-
-// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
-// be found (or there is no /etc/passwd file on the filesystem), then LookupId
-// returns an error.
-func LookupUid(uid int) (User, error) {
- return lookupUser(func(u User) bool {
- return u.Uid == uid
- })
-}
-
-func lookupGroup(filter func(g Group) bool) (Group, error) {
- // Get operating system-specific group reader-closer.
- group, err := GetGroup()
- if err != nil {
- return Group{}, err
- }
- defer group.Close()
-
- // Get the users.
- groups, err := ParseGroupFilter(group, filter)
- if err != nil {
- return Group{}, err
- }
-
- // No user entries found.
- if len(groups) == 0 {
- return Group{}, ErrNoGroupEntries
- }
-
- // Assume the first entry is the "correct" one.
- return groups[0], nil
-}
-
-// CurrentGroup looks up the current user's group by their primary group id's
-// entry in /etc/passwd. If the group cannot be found (or there is no
-// /etc/group file on the filesystem), then CurrentGroup returns an error.
-func CurrentGroup() (Group, error) {
- return LookupGid(syscall.Getgid())
-}
-
-// LookupGroup looks up a group by its name in /etc/group. If the group cannot
-// be found (or there is no /etc/group file on the filesystem), then LookupGroup
-// returns an error.
-func LookupGroup(groupname string) (Group, error) {
- return lookupGroup(func(g Group) bool {
- return g.Name == groupname
- })
-}
-
-// LookupGid looks up a group by its group id in /etc/group. If the group cannot
-// be found (or there is no /etc/group file on the filesystem), then LookupGid
-// returns an error.
-func LookupGid(gid int) (Group, error) {
- return lookupGroup(func(g Group) bool {
- return g.Gid == gid
- })
-}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
deleted file mode 100644
index 758b734..0000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
-
-package user
-
-import (
- "io"
- "os"
-)
-
-// Unix-specific path to the passwd and group formatted files.
-const (
- unixPasswdPath = "/etc/passwd"
- unixGroupPath = "/etc/group"
-)
-
-func GetPasswdPath() (string, error) {
- return unixPasswdPath, nil
-}
-
-func GetPasswd() (io.ReadCloser, error) {
- return os.Open(unixPasswdPath)
-}
-
-func GetGroupPath() (string, error) {
- return unixGroupPath, nil
-}
-
-func GetGroup() (io.ReadCloser, error) {
- return os.Open(unixGroupPath)
-}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
deleted file mode 100644
index 7217948..0000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
-
-package user
-
-import "io"
-
-func GetPasswdPath() (string, error) {
- return "", ErrUnsupported
-}
-
-func GetPasswd() (io.ReadCloser, error) {
- return nil, ErrUnsupported
-}
-
-func GetGroupPath() (string, error) {
- return "", ErrUnsupported
-}
-
-func GetGroup() (io.ReadCloser, error) {
- return nil, ErrUnsupported
-}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
deleted file mode 100644
index 43fd39e..0000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
+++ /dev/null
@@ -1,441 +0,0 @@
-package user
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-const (
- minId = 0
- maxId = 1<<31 - 1 //for 32-bit systems compatibility
-)
-
-var (
- ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId)
-)
-
-type User struct {
- Name string
- Pass string
- Uid int
- Gid int
- Gecos string
- Home string
- Shell string
-}
-
-type Group struct {
- Name string
- Pass string
- Gid int
- List []string
-}
-
-func parseLine(line string, v ...interface{}) {
- if line == "" {
- return
- }
-
- parts := strings.Split(line, ":")
- for i, p := range parts {
- // Ignore cases where we don't have enough fields to populate the arguments.
- // Some configuration files like to misbehave.
- if len(v) <= i {
- break
- }
-
- // Use the type of the argument to figure out how to parse it, scanf() style.
- // This is legit.
- switch e := v[i].(type) {
- case *string:
- *e = p
- case *int:
- // "numbers", with conversion errors ignored because of some misbehaving configuration files.
- *e, _ = strconv.Atoi(p)
- case *[]string:
- // Comma-separated lists.
- if p != "" {
- *e = strings.Split(p, ",")
- } else {
- *e = []string{}
- }
- default:
- // Someone goof'd when writing code using this function. Scream so they can hear us.
- panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e))
- }
- }
-}
-
-func ParsePasswdFile(path string) ([]User, error) {
- passwd, err := os.Open(path)
- if err != nil {
- return nil, err
- }
- defer passwd.Close()
- return ParsePasswd(passwd)
-}
-
-func ParsePasswd(passwd io.Reader) ([]User, error) {
- return ParsePasswdFilter(passwd, nil)
-}
-
-func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
- passwd, err := os.Open(path)
- if err != nil {
- return nil, err
- }
- defer passwd.Close()
- return ParsePasswdFilter(passwd, filter)
-}
-
-func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
- if r == nil {
- return nil, fmt.Errorf("nil source for passwd-formatted data")
- }
-
- var (
- s = bufio.NewScanner(r)
- out = []User{}
- )
-
- for s.Scan() {
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- line := strings.TrimSpace(s.Text())
- if line == "" {
- continue
- }
-
- // see: man 5 passwd
- // name:password:UID:GID:GECOS:directory:shell
- // Name:Pass:Uid:Gid:Gecos:Home:Shell
- // root:x:0:0:root:/root:/bin/bash
- // adm:x:3:4:adm:/var/adm:/bin/false
- p := User{}
- parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
-
- if filter == nil || filter(p) {
- out = append(out, p)
- }
- }
-
- return out, nil
-}
-
-func ParseGroupFile(path string) ([]Group, error) {
- group, err := os.Open(path)
- if err != nil {
- return nil, err
- }
-
- defer group.Close()
- return ParseGroup(group)
-}
-
-func ParseGroup(group io.Reader) ([]Group, error) {
- return ParseGroupFilter(group, nil)
-}
-
-func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
- group, err := os.Open(path)
- if err != nil {
- return nil, err
- }
- defer group.Close()
- return ParseGroupFilter(group, filter)
-}
-
-func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
- if r == nil {
- return nil, fmt.Errorf("nil source for group-formatted data")
- }
-
- var (
- s = bufio.NewScanner(r)
- out = []Group{}
- )
-
- for s.Scan() {
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- text := s.Text()
- if text == "" {
- continue
- }
-
- // see: man 5 group
- // group_name:password:GID:user_list
- // Name:Pass:Gid:List
- // root:x:0:root
- // adm:x:4:root,adm,daemon
- p := Group{}
- parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List)
-
- if filter == nil || filter(p) {
- out = append(out, p)
- }
- }
-
- return out, nil
-}
-
-type ExecUser struct {
- Uid int
- Gid int
- Sgids []int
- Home string
-}
-
-// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
-// given file paths and uses that data as the arguments to GetExecUser. If the
-// files cannot be opened for any reason, the error is ignored and a nil
-// io.Reader is passed instead.
-func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
- passwd, err := os.Open(passwdPath)
- if err != nil {
- passwd = nil
- } else {
- defer passwd.Close()
- }
-
- group, err := os.Open(groupPath)
- if err != nil {
- group = nil
- } else {
- defer group.Close()
- }
-
- return GetExecUser(userSpec, defaults, passwd, group)
-}
-
-// GetExecUser parses a user specification string (using the passwd and group
-// readers as sources for /etc/passwd and /etc/group data, respectively). In
-// the case of blank fields or missing data from the sources, the values in
-// defaults is used.
-//
-// GetExecUser will return an error if a user or group literal could not be
-// found in any entry in passwd and group respectively.
-//
-// Examples of valid user specifications are:
-// * ""
-// * "user"
-// * "uid"
-// * "user:group"
-// * "uid:gid
-// * "user:gid"
-// * "uid:group"
-//
-// It should be noted that if you specify a numeric user or group id, they will
-// not be evaluated as usernames (only the metadata will be filled). So attempting
-// to parse a user with user.Name = "1337" will produce the user with a UID of
-// 1337.
-func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
- if defaults == nil {
- defaults = new(ExecUser)
- }
-
- // Copy over defaults.
- user := &ExecUser{
- Uid: defaults.Uid,
- Gid: defaults.Gid,
- Sgids: defaults.Sgids,
- Home: defaults.Home,
- }
-
- // Sgids slice *cannot* be nil.
- if user.Sgids == nil {
- user.Sgids = []int{}
- }
-
- // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
- var userArg, groupArg string
- parseLine(userSpec, &userArg, &groupArg)
-
- // Convert userArg and groupArg to be numeric, so we don't have to execute
- // Atoi *twice* for each iteration over lines.
- uidArg, uidErr := strconv.Atoi(userArg)
- gidArg, gidErr := strconv.Atoi(groupArg)
-
- // Find the matching user.
- users, err := ParsePasswdFilter(passwd, func(u User) bool {
- if userArg == "" {
- // Default to current state of the user.
- return u.Uid == user.Uid
- }
-
- if uidErr == nil {
- // If the userArg is numeric, always treat it as a UID.
- return uidArg == u.Uid
- }
-
- return u.Name == userArg
- })
-
- // If we can't find the user, we have to bail.
- if err != nil && passwd != nil {
- if userArg == "" {
- userArg = strconv.Itoa(user.Uid)
- }
- return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
- }
-
- var matchedUserName string
- if len(users) > 0 {
- // First match wins, even if there's more than one matching entry.
- matchedUserName = users[0].Name
- user.Uid = users[0].Uid
- user.Gid = users[0].Gid
- user.Home = users[0].Home
- } else if userArg != "" {
- // If we can't find a user with the given username, the only other valid
- // option is if it's a numeric username with no associated entry in passwd.
-
- if uidErr != nil {
- // Not numeric.
- return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
- }
- user.Uid = uidArg
-
- // Must be inside valid uid range.
- if user.Uid < minId || user.Uid > maxId {
- return nil, ErrRange
- }
-
- // Okay, so it's numeric. We can just roll with this.
- }
-
- // On to the groups. If we matched a username, we need to do this because of
- // the supplementary group IDs.
- if groupArg != "" || matchedUserName != "" {
- groups, err := ParseGroupFilter(group, func(g Group) bool {
- // If the group argument isn't explicit, we'll just search for it.
- if groupArg == "" {
- // Check if user is a member of this group.
- for _, u := range g.List {
- if u == matchedUserName {
- return true
- }
- }
- return false
- }
-
- if gidErr == nil {
- // If the groupArg is numeric, always treat it as a GID.
- return gidArg == g.Gid
- }
-
- return g.Name == groupArg
- })
- if err != nil && group != nil {
- return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
- }
-
- // Only start modifying user.Gid if it is in explicit form.
- if groupArg != "" {
- if len(groups) > 0 {
- // First match wins, even if there's more than one matching entry.
- user.Gid = groups[0].Gid
- } else if groupArg != "" {
- // If we can't find a group with the given name, the only other valid
- // option is if it's a numeric group name with no associated entry in group.
-
- if gidErr != nil {
- // Not numeric.
- return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
- }
- user.Gid = gidArg
-
- // Must be inside valid gid range.
- if user.Gid < minId || user.Gid > maxId {
- return nil, ErrRange
- }
-
- // Okay, so it's numeric. We can just roll with this.
- }
- } else if len(groups) > 0 {
- // Supplementary group ids only make sense if in the implicit form.
- user.Sgids = make([]int, len(groups))
- for i, group := range groups {
- user.Sgids[i] = group.Gid
- }
- }
- }
-
- return user, nil
-}
-
-// GetAdditionalGroups looks up a list of groups by name or group id
-// against the given /etc/group formatted data. If a group name cannot
-// be found, an error will be returned. If a group id cannot be found,
-// or the given group data is nil, the id will be returned as-is
-// provided it is in the legal range.
-func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
- var groups = []Group{}
- if group != nil {
- var err error
- groups, err = ParseGroupFilter(group, func(g Group) bool {
- for _, ag := range additionalGroups {
- if g.Name == ag || strconv.Itoa(g.Gid) == ag {
- return true
- }
- }
- return false
- })
- if err != nil {
- return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
- }
- }
-
- gidMap := make(map[int]struct{})
- for _, ag := range additionalGroups {
- var found bool
- for _, g := range groups {
- // if we found a matched group either by name or gid, take the
- // first matched as correct
- if g.Name == ag || strconv.Itoa(g.Gid) == ag {
- if _, ok := gidMap[g.Gid]; !ok {
- gidMap[g.Gid] = struct{}{}
- found = true
- break
- }
- }
- }
- // we asked for a group but didn't find it. let's check to see
- // if we wanted a numeric group
- if !found {
- gid, err := strconv.Atoi(ag)
- if err != nil {
- return nil, fmt.Errorf("Unable to find group %s", ag)
- }
- // Ensure gid is inside gid range.
- if gid < minId || gid > maxId {
- return nil, ErrRange
- }
- gidMap[gid] = struct{}{}
- }
- }
- gids := []int{}
- for gid := range gidMap {
- gids = append(gids, gid)
- }
- return gids, nil
-}
-
-// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
-// that opens the groupPath given and gives it as an argument to
-// GetAdditionalGroups.
-func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
- group, err := os.Open(groupPath)
- if err == nil {
- defer group.Close()
- }
- return GetAdditionalGroups(additionalGroups, group)
-}
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
deleted file mode 100644
index 6a66aea..0000000
--- a/vendor/golang.org/x/net/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
deleted file mode 100644
index 7330990..0000000
--- a/vendor/golang.org/x/net/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
deleted file mode 100644
index 134654c..0000000
--- a/vendor/golang.org/x/net/context/context.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package context defines the Context type, which carries deadlines,
-// cancelation signals, and other request-scoped values across API boundaries
-// and between processes.
-//
-// Incoming requests to a server should create a Context, and outgoing calls to
-// servers should accept a Context. The chain of function calls between must
-// propagate the Context, optionally replacing it with a modified copy created
-// using WithDeadline, WithTimeout, WithCancel, or WithValue.
-//
-// Programs that use Contexts should follow these rules to keep interfaces
-// consistent across packages and enable static analysis tools to check context
-// propagation:
-//
-// Do not store Contexts inside a struct type; instead, pass a Context
-// explicitly to each function that needs it. The Context should be the first
-// parameter, typically named ctx:
-//
-// func DoSomething(ctx context.Context, arg Arg) error {
-// // ... use ctx ...
-// }
-//
-// Do not pass a nil Context, even if a function permits it. Pass context.TODO
-// if you are unsure about which Context to use.
-//
-// Use context Values only for request-scoped data that transits processes and
-// APIs, not for passing optional parameters to functions.
-//
-// The same Context may be passed to functions running in different goroutines;
-// Contexts are safe for simultaneous use by multiple goroutines.
-//
-// See http://blog.golang.org/context for example code for a server that uses
-// Contexts.
-package context // import "golang.org/x/net/context"
-
-import "time"
-
-// A Context carries a deadline, a cancelation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context interface {
- // Deadline returns the time when work done on behalf of this context
- // should be canceled. Deadline returns ok==false when no deadline is
- // set. Successive calls to Deadline return the same results.
- Deadline() (deadline time.Time, ok bool)
-
- // Done returns a channel that's closed when work done on behalf of this
- // context should be canceled. Done may return nil if this context can
- // never be canceled. Successive calls to Done return the same value.
- //
- // WithCancel arranges for Done to be closed when cancel is called;
- // WithDeadline arranges for Done to be closed when the deadline
- // expires; WithTimeout arranges for Done to be closed when the timeout
- // elapses.
- //
- // Done is provided for use in select statements:
- //
- // // Stream generates values with DoSomething and sends them to out
- // // until DoSomething returns an error or ctx.Done is closed.
- // func Stream(ctx context.Context, out chan<- Value) error {
- // for {
- // v, err := DoSomething(ctx)
- // if err != nil {
- // return err
- // }
- // select {
- // case <-ctx.Done():
- // return ctx.Err()
- // case out <- v:
- // }
- // }
- // }
- //
- // See http://blog.golang.org/pipelines for more examples of how to use
- // a Done channel for cancelation.
- Done() <-chan struct{}
-
- // Err returns a non-nil error value after Done is closed. Err returns
- // Canceled if the context was canceled or DeadlineExceeded if the
- // context's deadline passed. No other values for Err are defined.
- // After Done is closed, successive calls to Err return the same value.
- Err() error
-
- // Value returns the value associated with this context for key, or nil
- // if no value is associated with key. Successive calls to Value with
- // the same key returns the same result.
- //
- // Use context values only for request-scoped data that transits
- // processes and API boundaries, not for passing optional parameters to
- // functions.
- //
- // A key identifies a specific value in a Context. Functions that wish
- // to store values in Context typically allocate a key in a global
- // variable then use that key as the argument to context.WithValue and
- // Context.Value. A key can be any type that supports equality;
- // packages should define keys as an unexported type to avoid
- // collisions.
- //
- // Packages that define a Context key should provide type-safe accessors
- // for the values stores using that key:
- //
- // // Package user defines a User type that's stored in Contexts.
- // package user
- //
- // import "golang.org/x/net/context"
- //
- // // User is the type of value stored in the Contexts.
- // type User struct {...}
- //
- // // key is an unexported type for keys defined in this package.
- // // This prevents collisions with keys defined in other packages.
- // type key int
- //
- // // userKey is the key for user.User values in Contexts. It is
- // // unexported; clients use user.NewContext and user.FromContext
- // // instead of using this key directly.
- // var userKey key = 0
- //
- // // NewContext returns a new Context that carries value u.
- // func NewContext(ctx context.Context, u *User) context.Context {
- // return context.WithValue(ctx, userKey, u)
- // }
- //
- // // FromContext returns the User value stored in ctx, if any.
- // func FromContext(ctx context.Context) (*User, bool) {
- // u, ok := ctx.Value(userKey).(*User)
- // return u, ok
- // }
- Value(key interface{}) interface{}
-}
-
-// Background returns a non-nil, empty Context. It is never canceled, has no
-// values, and has no deadline. It is typically used by the main function,
-// initialization, and tests, and as the top-level Context for incoming
-// requests.
-func Background() Context {
- return background
-}
-
-// TODO returns a non-nil, empty Context. Code should use context.TODO when
-// it's unclear which Context to use or it is not yet available (because the
-// surrounding function has not yet been extended to accept a Context
-// parameter). TODO is recognized by static analysis tools that determine
-// whether Contexts are propagated correctly in a program.
-func TODO() Context {
- return todo
-}
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc func()
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
deleted file mode 100644
index f8cda19..0000000
--- a/vendor/golang.org/x/net/context/go17.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.7
-
-package context
-
-import (
- "context" // standard library's context, as of Go 1.7
- "time"
-)
-
-var (
- todo = context.TODO()
- background = context.Background()
-)
-
-// Canceled is the error returned by Context.Err when the context is canceled.
-var Canceled = context.Canceled
-
-// DeadlineExceeded is the error returned by Context.Err when the context's
-// deadline passes.
-var DeadlineExceeded = context.DeadlineExceeded
-
-// WithCancel returns a copy of parent with a new Done channel. The returned
-// context's Done channel is closed when the returned cancel function is called
-// or when the parent context's Done channel is closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
- ctx, f := context.WithCancel(parent)
- return ctx, CancelFunc(f)
-}
-
-// WithDeadline returns a copy of the parent context with the deadline adjusted
-// to be no later than d. If the parent's deadline is already earlier than d,
-// WithDeadline(parent, d) is semantically equivalent to parent. The returned
-// context's Done channel is closed when the deadline expires, when the returned
-// cancel function is called, or when the parent context's Done channel is
-// closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
- ctx, f := context.WithDeadline(parent, deadline)
- return ctx, CancelFunc(f)
-}
-
-// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete:
-//
-// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-// defer cancel() // releases resources if slowOperation completes before timeout elapses
-// return slowOperation(ctx)
-// }
-func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
- return WithDeadline(parent, time.Now().Add(timeout))
-}
-
-// WithValue returns a copy of parent in which the value associated with key is
-// val.
-//
-// Use context Values only for request-scoped data that transits processes and
-// APIs, not for passing optional parameters to functions.
-func WithValue(parent Context, key interface{}, val interface{}) Context {
- return context.WithValue(parent, key, val)
-}
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
deleted file mode 100644
index 5a30aca..0000000
--- a/vendor/golang.org/x/net/context/pre_go17.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.7
-
-package context
-
-import (
- "errors"
- "fmt"
- "sync"
- "time"
-)
-
-// An emptyCtx is never canceled, has no values, and has no deadline. It is not
-// struct{}, since vars of this type must have distinct addresses.
-type emptyCtx int
-
-func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
- return
-}
-
-func (*emptyCtx) Done() <-chan struct{} {
- return nil
-}
-
-func (*emptyCtx) Err() error {
- return nil
-}
-
-func (*emptyCtx) Value(key interface{}) interface{} {
- return nil
-}
-
-func (e *emptyCtx) String() string {
- switch e {
- case background:
- return "context.Background"
- case todo:
- return "context.TODO"
- }
- return "unknown empty Context"
-}
-
-var (
- background = new(emptyCtx)
- todo = new(emptyCtx)
-)
-
-// Canceled is the error returned by Context.Err when the context is canceled.
-var Canceled = errors.New("context canceled")
-
-// DeadlineExceeded is the error returned by Context.Err when the context's
-// deadline passes.
-var DeadlineExceeded = errors.New("context deadline exceeded")
-
-// WithCancel returns a copy of parent with a new Done channel. The returned
-// context's Done channel is closed when the returned cancel function is called
-// or when the parent context's Done channel is closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
- c := newCancelCtx(parent)
- propagateCancel(parent, c)
- return c, func() { c.cancel(true, Canceled) }
-}
-
-// newCancelCtx returns an initialized cancelCtx.
-func newCancelCtx(parent Context) *cancelCtx {
- return &cancelCtx{
- Context: parent,
- done: make(chan struct{}),
- }
-}
-
-// propagateCancel arranges for child to be canceled when parent is.
-func propagateCancel(parent Context, child canceler) {
- if parent.Done() == nil {
- return // parent is never canceled
- }
- if p, ok := parentCancelCtx(parent); ok {
- p.mu.Lock()
- if p.err != nil {
- // parent has already been canceled
- child.cancel(false, p.err)
- } else {
- if p.children == nil {
- p.children = make(map[canceler]bool)
- }
- p.children[child] = true
- }
- p.mu.Unlock()
- } else {
- go func() {
- select {
- case <-parent.Done():
- child.cancel(false, parent.Err())
- case <-child.Done():
- }
- }()
- }
-}
-
-// parentCancelCtx follows a chain of parent references until it finds a
-// *cancelCtx. This function understands how each of the concrete types in this
-// package represents its parent.
-func parentCancelCtx(parent Context) (*cancelCtx, bool) {
- for {
- switch c := parent.(type) {
- case *cancelCtx:
- return c, true
- case *timerCtx:
- return c.cancelCtx, true
- case *valueCtx:
- parent = c.Context
- default:
- return nil, false
- }
- }
-}
-
-// removeChild removes a context from its parent.
-func removeChild(parent Context, child canceler) {
- p, ok := parentCancelCtx(parent)
- if !ok {
- return
- }
- p.mu.Lock()
- if p.children != nil {
- delete(p.children, child)
- }
- p.mu.Unlock()
-}
-
-// A canceler is a context type that can be canceled directly. The
-// implementations are *cancelCtx and *timerCtx.
-type canceler interface {
- cancel(removeFromParent bool, err error)
- Done() <-chan struct{}
-}
-
-// A cancelCtx can be canceled. When canceled, it also cancels any children
-// that implement canceler.
-type cancelCtx struct {
- Context
-
- done chan struct{} // closed by the first cancel call.
-
- mu sync.Mutex
- children map[canceler]bool // set to nil by the first cancel call
- err error // set to non-nil by the first cancel call
-}
-
-func (c *cancelCtx) Done() <-chan struct{} {
- return c.done
-}
-
-func (c *cancelCtx) Err() error {
- c.mu.Lock()
- defer c.mu.Unlock()
- return c.err
-}
-
-func (c *cancelCtx) String() string {
- return fmt.Sprintf("%v.WithCancel", c.Context)
-}
-
-// cancel closes c.done, cancels each of c's children, and, if
-// removeFromParent is true, removes c from its parent's children.
-func (c *cancelCtx) cancel(removeFromParent bool, err error) {
- if err == nil {
- panic("context: internal error: missing cancel error")
- }
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return // already canceled
- }
- c.err = err
- close(c.done)
- for child := range c.children {
- // NOTE: acquiring the child's lock while holding parent's lock.
- child.cancel(false, err)
- }
- c.children = nil
- c.mu.Unlock()
-
- if removeFromParent {
- removeChild(c.Context, c)
- }
-}
-
-// WithDeadline returns a copy of the parent context with the deadline adjusted
-// to be no later than d. If the parent's deadline is already earlier than d,
-// WithDeadline(parent, d) is semantically equivalent to parent. The returned
-// context's Done channel is closed when the deadline expires, when the returned
-// cancel function is called, or when the parent context's Done channel is
-// closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
- if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
- // The current deadline is already sooner than the new one.
- return WithCancel(parent)
- }
- c := &timerCtx{
- cancelCtx: newCancelCtx(parent),
- deadline: deadline,
- }
- propagateCancel(parent, c)
- d := deadline.Sub(time.Now())
- if d <= 0 {
- c.cancel(true, DeadlineExceeded) // deadline has already passed
- return c, func() { c.cancel(true, Canceled) }
- }
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.err == nil {
- c.timer = time.AfterFunc(d, func() {
- c.cancel(true, DeadlineExceeded)
- })
- }
- return c, func() { c.cancel(true, Canceled) }
-}
-
-// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
-// implement Done and Err. It implements cancel by stopping its timer then
-// delegating to cancelCtx.cancel.
-type timerCtx struct {
- *cancelCtx
- timer *time.Timer // Under cancelCtx.mu.
-
- deadline time.Time
-}
-
-func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
- return c.deadline, true
-}
-
-func (c *timerCtx) String() string {
- return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
-}
-
-func (c *timerCtx) cancel(removeFromParent bool, err error) {
- c.cancelCtx.cancel(false, err)
- if removeFromParent {
- // Remove this timerCtx from its parent cancelCtx's children.
- removeChild(c.cancelCtx.Context, c)
- }
- c.mu.Lock()
- if c.timer != nil {
- c.timer.Stop()
- c.timer = nil
- }
- c.mu.Unlock()
-}
-
-// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete:
-//
-// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-// defer cancel() // releases resources if slowOperation completes before timeout elapses
-// return slowOperation(ctx)
-// }
-func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
- return WithDeadline(parent, time.Now().Add(timeout))
-}
-
-// WithValue returns a copy of parent in which the value associated with key is
-// val.
-//
-// Use context Values only for request-scoped data that transits processes and
-// APIs, not for passing optional parameters to functions.
-func WithValue(parent Context, key interface{}, val interface{}) Context {
- return &valueCtx{parent, key, val}
-}
-
-// A valueCtx carries a key-value pair. It implements Value for that key and
-// delegates all other calls to the embedded Context.
-type valueCtx struct {
- Context
- key, val interface{}
-}
-
-func (c *valueCtx) String() string {
- return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
-}
-
-func (c *valueCtx) Value(key interface{}) interface{} {
- if c.key == key {
- return c.val
- }
- return c.Context.Value(key)
-}
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
deleted file mode 100644
index 4c5ad88..0000000
--- a/vendor/golang.org/x/net/proxy/direct.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "net"
-)
-
-type direct struct{}
-
-// Direct is a direct proxy: one that makes network connections directly.
-var Direct = direct{}
-
-func (direct) Dial(network, addr string) (net.Conn, error) {
- return net.Dial(network, addr)
-}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
deleted file mode 100644
index f540b19..0000000
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "net"
- "strings"
-)
-
-// A PerHost directs connections to a default Dialer unless the hostname
-// requested matches one of a number of exceptions.
-type PerHost struct {
- def, bypass Dialer
-
- bypassNetworks []*net.IPNet
- bypassIPs []net.IP
- bypassZones []string
- bypassHosts []string
-}
-
-// NewPerHost returns a PerHost Dialer that directs connections to either
-// defaultDialer or bypass, depending on whether the connection matches one of
-// the configured rules.
-func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
- return &PerHost{
- def: defaultDialer,
- bypass: bypass,
- }
-}
-
-// Dial connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
-
- return p.dialerForRequest(host).Dial(network, addr)
-}
-
-func (p *PerHost) dialerForRequest(host string) Dialer {
- if ip := net.ParseIP(host); ip != nil {
- for _, net := range p.bypassNetworks {
- if net.Contains(ip) {
- return p.bypass
- }
- }
- for _, bypassIP := range p.bypassIPs {
- if bypassIP.Equal(ip) {
- return p.bypass
- }
- }
- return p.def
- }
-
- for _, zone := range p.bypassZones {
- if strings.HasSuffix(host, zone) {
- return p.bypass
- }
- if host == zone[1:] {
- // For a zone "example.com", we match "example.com"
- // too.
- return p.bypass
- }
- }
- for _, bypassHost := range p.bypassHosts {
- if bypassHost == host {
- return p.bypass
- }
- }
- return p.def
-}
-
-// AddFromString parses a string that contains comma-separated values
-// specifying hosts that should use the bypass proxy. Each value is either an
-// IP address, a CIDR range, a zone (*.example.com) or a hostname
-// (localhost). A best effort is made to parse the string and errors are
-// ignored.
-func (p *PerHost) AddFromString(s string) {
- hosts := strings.Split(s, ",")
- for _, host := range hosts {
- host = strings.TrimSpace(host)
- if len(host) == 0 {
- continue
- }
- if strings.Contains(host, "/") {
- // We assume that it's a CIDR address like 127.0.0.0/8
- if _, net, err := net.ParseCIDR(host); err == nil {
- p.AddNetwork(net)
- }
- continue
- }
- if ip := net.ParseIP(host); ip != nil {
- p.AddIP(ip)
- continue
- }
- if strings.HasPrefix(host, "*.") {
- p.AddZone(host[1:])
- continue
- }
- p.AddHost(host)
- }
-}
-
-// AddIP specifies an IP address that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match an IP.
-func (p *PerHost) AddIP(ip net.IP) {
- p.bypassIPs = append(p.bypassIPs, ip)
-}
-
-// AddNetwork specifies an IP range that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match.
-func (p *PerHost) AddNetwork(net *net.IPNet) {
- p.bypassNetworks = append(p.bypassNetworks, net)
-}
-
-// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
-// "example.com" matches "example.com" and all of its subdomains.
-func (p *PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
- if !strings.HasPrefix(zone, ".") {
- zone = "." + zone
- }
- p.bypassZones = append(p.bypassZones, zone)
-}
-
-// AddHost specifies a hostname that will use the bypass proxy.
-func (p *PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
- p.bypassHosts = append(p.bypassHosts, host)
-}
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
deleted file mode 100644
index 78a8b7b..0000000
--- a/vendor/golang.org/x/net/proxy/proxy.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package proxy provides support for a variety of protocols to proxy network
-// data.
-package proxy // import "golang.org/x/net/proxy"
-
-import (
- "errors"
- "net"
- "net/url"
- "os"
-)
-
-// A Dialer is a means to establish a connection.
-type Dialer interface {
- // Dial connects to the given address via the proxy.
- Dial(network, addr string) (c net.Conn, err error)
-}
-
-// Auth contains authentication parameters that specific Dialers may require.
-type Auth struct {
- User, Password string
-}
-
-// FromEnvironment returns the dialer specified by the proxy related variables in
-// the environment.
-func FromEnvironment() Dialer {
- allProxy := os.Getenv("all_proxy")
- if len(allProxy) == 0 {
- return Direct
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return Direct
- }
- proxy, err := FromURL(proxyURL, Direct)
- if err != nil {
- return Direct
- }
-
- noProxy := os.Getenv("no_proxy")
- if len(noProxy) == 0 {
- return proxy
- }
-
- perHost := NewPerHost(proxy, Direct)
- perHost.AddFromString(noProxy)
- return perHost
-}
-
-// proxySchemes is a map from URL schemes to a function that creates a Dialer
-// from a URL with such a scheme.
-var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
-
-// RegisterDialerType takes a URL scheme and a function to generate Dialers from
-// a URL with that scheme and a forwarding Dialer. Registered schemes are used
-// by FromURL.
-func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
- if proxySchemes == nil {
- proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
- }
- proxySchemes[scheme] = f
-}
-
-// FromURL returns a Dialer given a URL specification and an underlying
-// Dialer for it to make network requests.
-func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
- var auth *Auth
- if u.User != nil {
- auth = new(Auth)
- auth.User = u.User.Username()
- if p, ok := u.User.Password(); ok {
- auth.Password = p
- }
- }
-
- switch u.Scheme {
- case "socks5":
- return SOCKS5("tcp", u.Host, auth, forward)
- }
-
- // If the scheme doesn't match any of the built-in schemes, see if it
- // was registered by another package.
- if proxySchemes != nil {
- if f, ok := proxySchemes[u.Scheme]; ok {
- return f(u, forward)
- }
- }
-
- return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
-}
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
deleted file mode 100644
index 9b96282..0000000
--- a/vendor/golang.org/x/net/proxy/socks5.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "errors"
- "io"
- "net"
- "strconv"
-)
-
-// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
-// with an optional username and password. See RFC 1928.
-func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) {
- s := &socks5{
- network: network,
- addr: addr,
- forward: forward,
- }
- if auth != nil {
- s.user = auth.User
- s.password = auth.Password
- }
-
- return s, nil
-}
-
-type socks5 struct {
- user, password string
- network, addr string
- forward Dialer
-}
-
-const socks5Version = 5
-
-const (
- socks5AuthNone = 0
- socks5AuthPassword = 2
-)
-
-const socks5Connect = 1
-
-const (
- socks5IP4 = 1
- socks5Domain = 3
- socks5IP6 = 4
-)
-
-var socks5Errors = []string{
- "",
- "general failure",
- "connection forbidden",
- "network unreachable",
- "host unreachable",
- "connection refused",
- "TTL expired",
- "command not supported",
- "address type not supported",
-}
-
-// Dial connects to the address addr on the network net via the SOCKS5 proxy.
-func (s *socks5) Dial(network, addr string) (net.Conn, error) {
- switch network {
- case "tcp", "tcp6", "tcp4":
- default:
- return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
- }
-
- conn, err := s.forward.Dial(s.network, s.addr)
- if err != nil {
- return nil, err
- }
- closeConn := &conn
- defer func() {
- if closeConn != nil {
- (*closeConn).Close()
- }
- }()
-
- host, portStr, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
-
- port, err := strconv.Atoi(portStr)
- if err != nil {
- return nil, errors.New("proxy: failed to parse port number: " + portStr)
- }
- if port < 1 || port > 0xffff {
- return nil, errors.New("proxy: port number out of range: " + portStr)
- }
-
- // the size here is just an estimate
- buf := make([]byte, 0, 6+len(host))
-
- buf = append(buf, socks5Version)
- if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
- buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword)
- } else {
- buf = append(buf, 1 /* num auth methods */, socks5AuthNone)
- }
-
- if _, err := conn.Write(buf); err != nil {
- return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
- if buf[0] != 5 {
- return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
- }
- if buf[1] == 0xff {
- return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
- }
-
- if buf[1] == socks5AuthPassword {
- buf = buf[:0]
- buf = append(buf, 1 /* password protocol version */)
- buf = append(buf, uint8(len(s.user)))
- buf = append(buf, s.user...)
- buf = append(buf, uint8(len(s.password)))
- buf = append(buf, s.password...)
-
- if _, err := conn.Write(buf); err != nil {
- return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if buf[1] != 0 {
- return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
- }
- }
-
- buf = buf[:0]
- buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */)
-
- if ip := net.ParseIP(host); ip != nil {
- if ip4 := ip.To4(); ip4 != nil {
- buf = append(buf, socks5IP4)
- ip = ip4
- } else {
- buf = append(buf, socks5IP6)
- }
- buf = append(buf, ip...)
- } else {
- if len(host) > 255 {
- return nil, errors.New("proxy: destination hostname too long: " + host)
- }
- buf = append(buf, socks5Domain)
- buf = append(buf, byte(len(host)))
- buf = append(buf, host...)
- }
- buf = append(buf, byte(port>>8), byte(port))
-
- if _, err := conn.Write(buf); err != nil {
- return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- if _, err := io.ReadFull(conn, buf[:4]); err != nil {
- return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- failure := "unknown error"
- if int(buf[1]) < len(socks5Errors) {
- failure = socks5Errors[buf[1]]
- }
-
- if len(failure) > 0 {
- return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
- }
-
- bytesToDiscard := 0
- switch buf[3] {
- case socks5IP4:
- bytesToDiscard = net.IPv4len
- case socks5IP6:
- bytesToDiscard = net.IPv6len
- case socks5Domain:
- _, err := io.ReadFull(conn, buf[:1])
- if err != nil {
- return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
- bytesToDiscard = int(buf[0])
- default:
- return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
- }
-
- if cap(buf) < bytesToDiscard {
- buf = make([]byte, bytesToDiscard)
- } else {
- buf = buf[:bytesToDiscard]
- }
- if _, err := io.ReadFull(conn, buf); err != nil {
- return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- // Also need to discard the port number
- if _, err := io.ReadFull(conn, buf[:2]); err != nil {
- return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
- }
-
- closeConn = nil
- return conn, nil
-}
diff --git a/volume.go b/volume.go
index 20d203d..971021d 100644
--- a/volume.go
+++ b/volume.go
@@ -4,14 +4,12 @@ import (
dockType "github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"golang.org/x/net/context"
-
-
)
// create a new volume
func VolumeCreate(name string) (dockType.Volume, error) {
vol := dockType.VolumeCreateRequest{
- Name: name,
+ Name: name,
}
return client.VolumeCreate(context.Background(), vol)
@@ -34,7 +32,6 @@ func VolumeExists(name string) bool {
if volume.Name == name {
return true
}
-
}
return false
}
@@ -42,4 +39,4 @@ func VolumeExists(name string) bool {
// remove an existing volume
func VolumeRemove(name string) error {
return client.VolumeRemove(context.Background(), name, true)
-}
\ No newline at end of file
+}